mirror of
https://github.com/rclone/rclone.git
synced 2026-01-24 05:13:23 +00:00
Compare commits
77 Commits
crypt-pass
...
adb-remote
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3e1a0f368 | ||
|
|
2683939a4b | ||
|
|
ed88ae878e | ||
|
|
2ba5c35e88 | ||
|
|
240c15883f | ||
|
|
38864adc9c | ||
|
|
5991315990 | ||
|
|
73f0a67d98 | ||
|
|
ffe067d6e7 | ||
|
|
b5f563fb0f | ||
|
|
9310c7f3e2 | ||
|
|
1c1a8ef24b | ||
|
|
2cfbc2852d | ||
|
|
b167d30420 | ||
|
|
ec59760d9c | ||
|
|
076d3da825 | ||
|
|
c3eecbe933 | ||
|
|
d8e5b19ed4 | ||
|
|
43bc381e90 | ||
|
|
fb5ee22112 | ||
|
|
35327dad6f | ||
|
|
ef5e1909a0 | ||
|
|
bca5d8009e | ||
|
|
334f19c974 | ||
|
|
42a5bf1d9f | ||
|
|
71d1890316 | ||
|
|
d29c545627 | ||
|
|
eb85ecc9c4 | ||
|
|
0dc08e1e61 | ||
|
|
76532408ef | ||
|
|
60a4a8a86d | ||
|
|
a0d4c04687 | ||
|
|
f3874707ee | ||
|
|
f8c2689e77 | ||
|
|
8ec55ae20b | ||
|
|
fc1bf5f931 | ||
|
|
578d00666c | ||
|
|
f5c853b5c8 | ||
|
|
23c0cd2482 | ||
|
|
8217f361cc | ||
|
|
a0016e00d1 | ||
|
|
99c37028ee | ||
|
|
cfba337ef0 | ||
|
|
fd370fcad2 | ||
|
|
c680bb3254 | ||
|
|
7d5d6c041f | ||
|
|
bdc638530e | ||
|
|
315cee23a0 | ||
|
|
2135879dda | ||
|
|
da90069462 | ||
|
|
08c4854e00 | ||
|
|
a838add230 | ||
|
|
d68b091170 | ||
|
|
d809bed438 | ||
|
|
3aa1818870 | ||
|
|
96f6708461 | ||
|
|
6641a25f8c | ||
|
|
cd46ce916b | ||
|
|
318d1bb6f9 | ||
|
|
b8b53901e8 | ||
|
|
6e153781a7 | ||
|
|
f27c2d9760 | ||
|
|
eb91356e28 | ||
|
|
bed2971bf0 | ||
|
|
f0696dfe30 | ||
|
|
a43ed567ee | ||
|
|
fffdbb31f5 | ||
|
|
cacefb9a82 | ||
|
|
d966cef14c | ||
|
|
a551978a3f | ||
|
|
97752ca8fb | ||
|
|
8d5d332daf | ||
|
|
6b3a9bf26a | ||
|
|
c1d9a1e174 | ||
|
|
98120bb864 | ||
|
|
f8ced557e3 | ||
|
|
7b20139c6a |
30
.golangci.yml
Normal file
30
.golangci.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
# golangci-lint configuration options
|
||||
|
||||
run:
|
||||
build-tags:
|
||||
- cmount
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- varcheck
|
||||
- govet
|
||||
- unconvert
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
@@ -8,6 +8,7 @@ go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12rc1
|
||||
- tip
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
@@ -24,6 +25,7 @@ script:
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GO111MODULE=off
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
@@ -42,7 +44,7 @@ matrix:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
env: GOTAGS=""
|
||||
cache:
|
||||
directories:
|
||||
@@ -54,5 +56,5 @@ deploy:
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.11.x
|
||||
go: 1.12rc1
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
1295
MANUAL.html
1295
MANUAL.html
File diff suppressed because it is too large
Load Diff
1525
MANUAL.txt
1525
MANUAL.txt
File diff suppressed because it is too large
Load Diff
28
Makefile
28
Makefile
@@ -11,7 +11,7 @@ ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
@@ -64,30 +64,20 @@ endif
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl|ApplicationCredentialId)' ; test $$? -eq 1
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
else
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u golang.org/x/lint/golint
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
@@ -199,7 +189,7 @@ endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
|
||||
@@ -44,7 +44,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
|
||||
* OVH [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* Openstack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
@@ -62,13 +62,13 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
|
||||
## Features
|
||||
|
||||
* MD5/SHA1 hashes checked at all times for file integrity
|
||||
* MD5/SHA-1 hashes checked at all times for file integrity
|
||||
* Timestamps preserved on files
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, eg two different cloud accounts
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
|
||||
821
backend/adb/adb.go
Normal file
821
backend/adb/adb.go
Normal file
@@ -0,0 +1,821 @@
|
||||
package adb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
adb "github.com/thinkhy/go-adb"
|
||||
"github.com/thinkhy/go-adb/wire"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "adb",
|
||||
Description: "Android Debug Bridge",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "serial",
|
||||
Help: "The device serial to use. Leave empty for auto selection.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "host",
|
||||
Default: "localhost",
|
||||
Help: "The ADB server host.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Default: 5037,
|
||||
Help: "The ADB server port.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "executable",
|
||||
Help: "The ADB executable path.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_links",
|
||||
Help: "Follow symlinks and copy the pointed to item.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Serial string
|
||||
Host string
|
||||
Port uint16
|
||||
Executable string
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
}
|
||||
|
||||
// Fs represents a adb device
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
client *adb.Adb
|
||||
device *execDevice
|
||||
statFunc statFunc
|
||||
statFuncMu sync.Mutex
|
||||
touchFunc touchFunc
|
||||
touchFuncMu sync.Mutex
|
||||
}
|
||||
|
||||
// Object describes a adb file
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("ADB root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if root == "" {
|
||||
root = "/"
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
statFunc: (*Object).statTry,
|
||||
touchFunc: (*Object).touchTry,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
f.client, err = adb.NewWithConfig(adb.ServerConfig{
|
||||
Host: opt.Host,
|
||||
Port: int(opt.Port),
|
||||
PathToAdb: opt.Executable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not configure ADB server")
|
||||
}
|
||||
err = f.client.StartServer()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not start ADB server")
|
||||
}
|
||||
|
||||
serverVersion, err := f.client.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB server version")
|
||||
}
|
||||
fs.Debugf(f, "ADB server version: 0x%X", serverVersion)
|
||||
|
||||
serials, err := f.client.ListDeviceSerials()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Could not get ADB devices")
|
||||
}
|
||||
descriptor := adb.AnyDevice()
|
||||
if opt.Serial != "" {
|
||||
descriptor = adb.DeviceWithSerial(opt.Serial)
|
||||
}
|
||||
if len(serials) > 1 && opt.Serial == "" {
|
||||
return nil, errors.New("Multiple ADB devices found. Use the serial config to select a specific device")
|
||||
}
|
||||
f.device = &execDevice{f.client.Device(descriptor)}
|
||||
|
||||
// follow symlinks for root pathes
|
||||
entry, err := f.newEntryFollowSymlinks("")
|
||||
switch err {
|
||||
case nil:
|
||||
case fs.ErrorObjectNotFound:
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
switch entry.(type) {
|
||||
case fs.Object:
|
||||
f.root = path.Dir(f.root)
|
||||
return f, fs.ErrorIsFile
|
||||
case nil:
|
||||
return f, nil
|
||||
case fs.Directory:
|
||||
return f, nil
|
||||
default:
|
||||
return nil, errors.Errorf("Invalid root entry type %t", entry)
|
||||
}
|
||||
}
|
||||
|
||||
// Precision of the object storage system
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return 1 * time.Second
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
p := path.Join(f.root, dir)
|
||||
dirEntries, err := f.device.ListDirEntries(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
|
||||
defer fs.CheckClose(dirEntries, &err)
|
||||
|
||||
found := false
|
||||
for dirEntries.Next() {
|
||||
found = true
|
||||
dirEntry := dirEntries.Entry()
|
||||
switch dirEntry.Name {
|
||||
case ".", "..":
|
||||
continue
|
||||
}
|
||||
fsEntry, err := f.entryForDirEntry(path.Join(dir, dirEntry.Name), dirEntry, f.opt.FollowSymlinks)
|
||||
if err != nil {
|
||||
fs.Errorf(p, "Listing error: %q: %v", dirEntry.Name, err)
|
||||
return nil, err
|
||||
} else if fsEntry != nil {
|
||||
entries = append(entries, fsEntry)
|
||||
} else {
|
||||
fs.Debugf(f, "Skipping DirEntry %#v", dirEntry)
|
||||
}
|
||||
}
|
||||
err = dirEntries.Err()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ListDirEntries")
|
||||
}
|
||||
if !found {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) entryForDirEntry(remote string, e *adb.DirEntry, followSymlinks bool) (fs.DirEntry, error) {
|
||||
o := f.newObjectWithInfo(remote, e)
|
||||
// Follow symlinks if required
|
||||
if followSymlinks && (e.Mode&os.ModeSymlink) != 0 {
|
||||
err := f.statFunc(&o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return fs.NewDir(remote, o.modTime), nil
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newEntry(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, f.opt.FollowSymlinks)
|
||||
}
|
||||
func (f *Fs) newEntryFollowSymlinks(remote string) (fs.DirEntry, error) {
|
||||
return f.newEntryWithFollow(remote, true)
|
||||
}
|
||||
func (f *Fs) newEntryWithFollow(remote string, followSymlinks bool) (fs.DirEntry, error) {
|
||||
entry, err := f.device.Stat(path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
if adb.HasErrCode(err, adb.FileNoExistError) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, errors.Wrapf(err, "Stat failed")
|
||||
}
|
||||
return f.entryForDirEntry(remote, entry, followSymlinks)
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectWithInfo(remote string, e *adb.DirEntry) Object {
|
||||
return Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: int64(e.Size),
|
||||
mode: e.Mode,
|
||||
modTime: e.ModifiedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
entry, err := f.newEntry(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote)
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
func (f *Fs) newObject(remote string) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("mkdir -p", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
entry, _ := f.newEntry(p)
|
||||
if _, ok := entry.(fs.Directory); ok {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("mkdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
p := path.Join(f.root, dir)
|
||||
output, code, err := f.device.execCommandWithExitCode("rmdir", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rmdir %q failed with %d: %q", dir, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rmdir")
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
return o.fs.touchFunc(o, t)
|
||||
}
|
||||
|
||||
func (o *Object) stat() error {
|
||||
return o.statStatArg(statArgC, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) setMetadata(entry *adb.DirEntry) {
|
||||
// Don't overwrite the values if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != int64(entry.Size) {
|
||||
o.size = int64(entry.Size)
|
||||
}
|
||||
if !o.modTime.Equal(entry.ModifiedAt) {
|
||||
o.modTime = entry.ModifiedAt
|
||||
}
|
||||
if o.mode != entry.Mode {
|
||||
o.mode = decodeEntryMode(uint32(entry.Mode))
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
const blockSize = 1 << 12
|
||||
|
||||
var offset, count int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
if offset > o.size {
|
||||
offset = o.size
|
||||
}
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
} else if count+offset > o.size {
|
||||
count = o.size - offset
|
||||
}
|
||||
fs.Debugf(o, "Open: remote: %q offset: %d count: %d", o.remote, offset, count)
|
||||
|
||||
if count == 0 {
|
||||
return ioutil.NopCloser(bytes.NewReader(nil)), nil
|
||||
}
|
||||
offsetBlocks, offsetRest := offset/blockSize, offset%blockSize
|
||||
countBlocks := (count-1)/blockSize + 1
|
||||
|
||||
conn, err := o.fs.device.execCommand(fmt.Sprintf("sh -c 'dd \"if=$0\" bs=%d skip=%d count=%d 2>/dev/null'", blockSize, offsetBlocks, countBlocks), path.Join(o.fs.root, o.remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &adbReader{
|
||||
ReadCloser: readers.NewLimitedReadCloser(conn, count+offsetRest),
|
||||
skip: offsetRest,
|
||||
expected: count,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
for _, option := range options {
|
||||
if option.Mandatory() {
|
||||
fs.Logf(option, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
written, err := o.writeToFile(path.Join(o.fs.root, o.remote), in, 0666, src.ModTime())
|
||||
if err != nil {
|
||||
if removeErr := o.Remove(); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
expected := src.Size()
|
||||
if expected == -1 {
|
||||
expected = written
|
||||
}
|
||||
for _, t := range []int64{100, 250, 500, 1000, 2500, 5000, 10000} {
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.size == expected {
|
||||
return nil
|
||||
}
|
||||
fs.Debugf(o, "Invalid size after update, expected: %d got: %d", expected, o.size)
|
||||
time.Sleep(time.Duration(t) * time.Millisecond)
|
||||
}
|
||||
return o.stat()
|
||||
}
|
||||
|
||||
// Remove this object
|
||||
func (o *Object) Remove() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("rm", p)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("rm %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "rm")
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) writeToFile(path string, rd io.Reader, perms os.FileMode, modeTime time.Time) (written int64, err error) {
|
||||
dst, err := o.fs.device.OpenWrite(path, perms, modeTime)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fs.CheckClose(dst, &err)
|
||||
return io.Copy(dst, rd)
|
||||
}
|
||||
|
||||
type statFunc func(*Object) error
|
||||
|
||||
func (o *Object) statTry() error {
|
||||
o.fs.statFuncMu.Lock()
|
||||
defer o.fs.statFuncMu.Unlock()
|
||||
|
||||
for _, f := range []statFunc{
|
||||
(*Object).statStatL, (*Object).statRealPath, (*Object).statReadLink,
|
||||
} {
|
||||
err := f(o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.statFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
statArgLc = "-Lc"
|
||||
statArgC = "-c"
|
||||
)
|
||||
|
||||
func (o *Object) statStatL() error {
|
||||
return o.statStatArg(statArgLc, path.Join(o.fs.root, o.remote))
|
||||
}
|
||||
|
||||
func (o *Object) statStatArg(arg, path string) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("stat %s %s", arg, "%f,%s,%Y"), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("stat %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "stat")
|
||||
}
|
||||
|
||||
parts := strings.Split(output, ",")
|
||||
if len(parts) != 3 {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
mode, err := strconv.ParseUint(parts[0], 16, 32)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
size, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
modTime, err := strconv.ParseInt(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
return errors.Errorf("stat %q invalid output %q", o.remote, output)
|
||||
}
|
||||
|
||||
o.size = int64(size)
|
||||
o.modTime = time.Unix(modTime, 0)
|
||||
o.mode = decodeEntryMode(uint32(mode))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) statReadLink() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("readlink -f", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("readlink %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "readlink")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
func (o *Object) statRealPath() error {
|
||||
p := path.Join(o.fs.root, o.remote)
|
||||
output, code, err := o.fs.device.execCommandWithExitCode("realpath", p)
|
||||
output = strings.TrimSuffix(output, "\n")
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("realpath %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "realpath")
|
||||
}
|
||||
return o.statStatArg(statArgC, output)
|
||||
}
|
||||
|
||||
type touchFunc func(*Object, time.Time) error
|
||||
|
||||
func (o *Object) touchTry(t time.Time) error {
|
||||
o.fs.touchFuncMu.Lock()
|
||||
defer o.fs.touchFuncMu.Unlock()
|
||||
|
||||
for _, f := range []touchFunc{
|
||||
(*Object).touchCmd, (*Object).touchCd,
|
||||
} {
|
||||
err := f(o, t)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "%s", err)
|
||||
} else {
|
||||
o.fs.touchFunc = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Errorf("unable to resolve link target")
|
||||
}
|
||||
|
||||
const (
|
||||
touchArgCmd = "-cmd"
|
||||
touchArgCd = "-cd"
|
||||
)
|
||||
|
||||
func (o *Object) touchCmd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCmd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
func (o *Object) touchCd(t time.Time) error {
|
||||
return o.touchStatArg(touchArgCd, path.Join(o.fs.root, o.remote), t)
|
||||
}
|
||||
|
||||
func (o *Object) touchStatArg(arg, path string, t time.Time) error {
|
||||
output, code, err := o.fs.device.execCommandWithExitCode(fmt.Sprintf("touch %s %s", arg, t.Format(time.RFC3339Nano)), path)
|
||||
output = strings.TrimSpace(output)
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
case adb.ShellExitError:
|
||||
return errors.Errorf("touch %q failed with %d: %q", o.remote, code, output)
|
||||
default:
|
||||
return errors.Wrap(err, "touch")
|
||||
}
|
||||
|
||||
err = o.stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if diff, ok := checkTimeEqualWithPrecision(t, o.modTime, o.fs.Precision()); !ok {
|
||||
return errors.Errorf("touch %q to %s was ineffective: %d", o.remote, t, diff)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
|
||||
dt := t0.Sub(t1)
|
||||
if dt >= precision || dt <= -precision {
|
||||
return dt, false
|
||||
}
|
||||
return dt, true
|
||||
}
|
||||
|
||||
func decodeEntryMode(entryMode uint32) os.FileMode {
|
||||
const (
|
||||
unixIFBLK = 0x6000
|
||||
unixIFMT = 0xf000
|
||||
unixIFCHR = 0x2000
|
||||
unixIFDIR = 0x4000
|
||||
unixIFIFO = 0x1000
|
||||
unixIFLNK = 0xa000
|
||||
unixIFREG = 0x8000
|
||||
unixIFSOCK = 0xc000
|
||||
unixISGID = 0x400
|
||||
unixISUID = 0x800
|
||||
unixISVTX = 0x200
|
||||
)
|
||||
|
||||
mode := os.FileMode(entryMode & 0777)
|
||||
switch entryMode & unixIFMT {
|
||||
case unixIFBLK:
|
||||
mode |= os.ModeDevice
|
||||
case unixIFCHR:
|
||||
mode |= os.ModeDevice | os.ModeCharDevice
|
||||
case unixIFDIR:
|
||||
mode |= os.ModeDir
|
||||
case unixIFIFO:
|
||||
mode |= os.ModeNamedPipe
|
||||
case unixIFLNK:
|
||||
mode |= os.ModeSymlink
|
||||
case unixIFREG:
|
||||
// nothing to do
|
||||
case unixIFSOCK:
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
if entryMode&unixISGID != 0 {
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if entryMode&unixISUID != 0 {
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if entryMode&unixISVTX != 0 {
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
type execDevice struct {
|
||||
*adb.Device
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommandWithExitCode(cmd string, arg string) (string, int, error) {
|
||||
cmdLine := fmt.Sprintf("sh -c '%s \"$0\"; echo :$?' '%s'", cmd, strings.Replace(arg, "'", "'\\''", -1))
|
||||
fs.Debugf("adb", "exec: %s", cmdLine)
|
||||
conn, err := d.execCommand(cmdLine)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
resp, err := conn.ReadUntilEof()
|
||||
if err != nil {
|
||||
return "", -1, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
|
||||
outStr := string(resp)
|
||||
idx := strings.LastIndexByte(outStr, ':')
|
||||
if idx == -1 {
|
||||
return outStr, -1, fmt.Errorf("adb shell aborted, can not parse exit code")
|
||||
}
|
||||
exitCode, _ := strconv.Atoi(strings.TrimSpace(outStr[idx+1:]))
|
||||
if exitCode != 0 {
|
||||
err = adb.ShellExitError{Command: cmdLine, ExitCode: exitCode}
|
||||
}
|
||||
return outStr[:idx], exitCode, err
|
||||
}
|
||||
|
||||
func (d *execDevice) execCommand(cmd string, args ...string) (*wire.Conn, error) {
|
||||
cmd = prepareCommandLineEscaped(cmd, args...)
|
||||
conn, err := d.Dial()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && conn != nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
req := fmt.Sprintf("exec:%s", cmd)
|
||||
|
||||
if err = conn.SendMessage([]byte(req)); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
if _, err = conn.ReadStatus(req); err != nil {
|
||||
return nil, errors.Wrap(err, "ExecCommand")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func prepareCommandLineEscaped(cmd string, args ...string) string {
|
||||
for i, arg := range args {
|
||||
args[i] = fmt.Sprintf("'%s'", strings.Replace(arg, "'", "'\\''", -1))
|
||||
}
|
||||
|
||||
// Prepend the command to the args array.
|
||||
if len(args) > 0 {
|
||||
cmd = fmt.Sprintf("%s %s", cmd, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type adbReader struct {
|
||||
io.ReadCloser
|
||||
skip int64
|
||||
read int64
|
||||
expected int64
|
||||
}
|
||||
|
||||
func (r *adbReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.ReadCloser.Read(b)
|
||||
if s := r.skip; n > 0 && s > 0 {
|
||||
_n := int64(n)
|
||||
if _n <= s {
|
||||
r.skip -= _n
|
||||
return r.Read(b)
|
||||
}
|
||||
r.skip = 0
|
||||
copy(b, b[s:n])
|
||||
n -= int(s)
|
||||
}
|
||||
r.read += int64(n)
|
||||
if err == io.EOF && r.read < r.expected {
|
||||
fs.Debugf("adb", "Read: read: %d expected: %d n: %d", r.read, r.expected, n)
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
20
backend/adb/adb_test.go
Normal file
20
backend/adb/adb_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test ADB filesystem interface
|
||||
package adb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/adb"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAdb:/data/local/tmp",
|
||||
NilObject: (*adb.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: "TestAdb", Key: "copy_links", Value: "true"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -30,7 +30,7 @@ type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestNewFS(t *testing.T) {
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/adb"
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
|
||||
@@ -77,7 +77,7 @@ func init() {
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to chunked upload (<= 256MB).",
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -85,7 +85,7 @@ func init() {
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
@@ -307,7 +307,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -392,7 +392,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.New("Container name in SAS URL and container provided in command do not match")
|
||||
}
|
||||
|
||||
container = parts.ContainerName
|
||||
f.container = parts.ContainerName
|
||||
containerURL = azblob.NewContainerURL(*u, pipeline)
|
||||
} else {
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
@@ -1038,7 +1038,7 @@ func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetProper
|
||||
o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5())
|
||||
o.mimeType = info.ContentType()
|
||||
o.size = size
|
||||
o.modTime = time.Time(info.LastModified())
|
||||
o.modTime = info.LastModified()
|
||||
o.accessTier = azblob.AccessTierType(info.AccessTier())
|
||||
o.setMetadata(metadata)
|
||||
|
||||
@@ -1104,12 +1104,6 @@ func (o *Object) readMetaData() (err error) {
|
||||
return o.decodeMetaDataFromPropertiesResponse(blobProperties)
|
||||
}
|
||||
|
||||
// timeString returns modTime as the number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC as a decimal string.
|
||||
func timeString(modTime time.Time) string {
|
||||
return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
|
||||
}
|
||||
|
||||
// parseTimeString converts a decimal string number of milliseconds
|
||||
// elapsed since January 1, 1970 UTC into a time.Time and stores it in
|
||||
// the modTime variable.
|
||||
|
||||
@@ -17,12 +17,12 @@ type Error struct {
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
// Fatal satisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
@@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
// IsZero returns true if the timestamp is uninitialized
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return time.Time(t).IsZero()
|
||||
}
|
||||
|
||||
@@ -108,7 +108,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
|
||||
Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
@@ -117,14 +117,22 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimim size.`,
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_url",
|
||||
Help: `Custom endpoint for downloads.
|
||||
|
||||
This is usually set to a Cloudflare CDN URL as Backblaze offers
|
||||
free egress for data downloaded through the Cloudflare network.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -140,6 +148,7 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -319,7 +328,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -1296,9 +1305,17 @@ var _ io.ReadCloser = &openFile{}
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
|
||||
@@ -1459,7 +1476,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
||||
// post upload. In the case where a file extension is absent or the
|
||||
// lookup fails, the Content-Type is set to application/octet-stream. The
|
||||
// Content-Type mappings can be purused here.
|
||||
// Content-Type mappings can be pursued here.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
// required
|
||||
|
||||
@@ -45,7 +45,7 @@ type Error struct {
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
@@ -57,7 +57,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
|
||||
@@ -171,13 +171,13 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
@@ -530,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
|
||||
@@ -211,8 +211,8 @@ outer:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
if reqSize >= chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
|
||||
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@@ -576,7 +576,7 @@ The slice indices are similar to Python slices: start[:end]
|
||||
|
||||
start is the 0 based chunk number from the beginning of the file
|
||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||
of the file to fetch exclisive.
|
||||
of the file to fetch exclusive.
|
||||
Both values can be negative, in which case they count from the back
|
||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||
|
||||
@@ -870,7 +870,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
}
|
||||
}
|
||||
|
||||
// ChangeNotify can subsribe multiple callers
|
||||
// ChangeNotify can subscribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
@@ -1549,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -1625,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
|
||||
// if this is a temp object then we perform the changes locally
|
||||
if srcObj.isTempFile() {
|
||||
// we check if the feature is stil active
|
||||
// we check if the feature is still active
|
||||
if f.opt.TempWritePath == "" {
|
||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||
return nil, fs.ErrorCantMove
|
||||
|
||||
31
backend/cache/cache_internal_test.go
vendored
31
backend/cache/cache_internal_test.go
vendored
@@ -387,10 +387,10 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
|
||||
// write the object
|
||||
o := runInstance.writeObjectBytes(t, cfs.UnWrap(), "data.bin", testData)
|
||||
require.Equal(t, o.Size(), int64(testSize))
|
||||
require.Equal(t, o.Size(), testSize)
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, int64(testSize), false)
|
||||
checkSample, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||
|
||||
@@ -726,6 +726,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -735,18 +736,21 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testData2 := randStringBytes(int(chunkSize))
|
||||
runInstance.writeObjectBytes(t, cfs.UnWrap(), runInstance.encryptRemoteIfNeeded(t, "test2"), testData2)
|
||||
|
||||
// list should have 1 item only
|
||||
li1, err = runInstance.list(t, rootFs, "")
|
||||
li1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
require.Equal(t, "ok", m["status"])
|
||||
@@ -754,6 +758,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
|
||||
// list should have 2 items now
|
||||
li2, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, li2, 2)
|
||||
}
|
||||
|
||||
@@ -1490,7 +1495,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
var err error
|
||||
|
||||
if r.useMount {
|
||||
f, err := os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
var f *os.File
|
||||
f, err = os.OpenFile(path.Join(runInstance.mntDir, src), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1500,7 +1506,8 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
}()
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1632,15 +1639,13 @@ func (r *run) getCacheFs(f fs.Fs) (*cache.Fs, error) {
|
||||
cfs, ok := f.(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
} else {
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
if f.Features().UnWrap != nil {
|
||||
cfs, ok := f.Features().UnWrap().(*cache.Fs)
|
||||
if ok {
|
||||
return cfs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("didn't found a cache fs")
|
||||
}
|
||||
|
||||
|
||||
6
backend/cache/storage_persistent.go
vendored
6
backend/cache/storage_persistent.go
vendored
@@ -398,7 +398,7 @@ func (b *Persistent) AddObject(cachedObject *Object) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
err = bucket.Put([]byte(cachedObject.Name), []byte(encoded))
|
||||
err = bucket.Put([]byte(cachedObject.Name), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", cachedObject, err)
|
||||
}
|
||||
@@ -809,7 +809,7 @@ func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", destPath, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
@@ -1049,7 +1049,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't marshal object (%v) info: %v", queuedEntry, err)
|
||||
}
|
||||
err = bucket.Put([]byte(destPath), []byte(encoded))
|
||||
err = bucket.Put([]byte(destPath), encoded)
|
||||
if err != nil {
|
||||
return errors.Errorf("couldn't cache object (%v) info: %v", destPath, err)
|
||||
}
|
||||
|
||||
@@ -463,7 +463,7 @@ func (c *cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if int(newRune) < base {
|
||||
newRune += 256
|
||||
}
|
||||
_, _ = result.WriteRune(rune(newRune))
|
||||
_, _ = result.WriteRune(newRune)
|
||||
|
||||
default:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
@@ -748,7 +748,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||
}
|
||||
// retreive the nonce
|
||||
// retrieve the nonce
|
||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||
fh.initialNonce = fh.nonce
|
||||
return fh, nil
|
||||
|
||||
@@ -122,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||
return newCipherForConfig(opt)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -555,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -481,7 +482,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
@@ -862,7 +863,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -1339,17 +1340,46 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listREntry is a task to be executed by a litRRunner
|
||||
type listREntry struct {
|
||||
id, path string
|
||||
}
|
||||
|
||||
// listRSlices is a helper struct to sort two slices at once
|
||||
type listRSlices struct {
|
||||
dirs []string
|
||||
paths []string
|
||||
}
|
||||
|
||||
func (s listRSlices) Sort() {
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s listRSlices) Len() int {
|
||||
return len(s.dirs)
|
||||
}
|
||||
|
||||
func (s listRSlices) Swap(i, j int) {
|
||||
s.dirs[i], s.dirs[j] = s.dirs[j], s.dirs[i]
|
||||
s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
|
||||
}
|
||||
|
||||
func (s listRSlices) Less(i, j int) bool {
|
||||
return s.dirs[i] < s.dirs[j]
|
||||
}
|
||||
|
||||
// listRRunner will read dirIDs from the in channel, perform the file listing an call cb with each DirEntry.
|
||||
//
|
||||
// In each cycle, will wait up to 10ms to read up to grouping entries from the in channel.
|
||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||
// nil is send to the out channel and the function returns.
|
||||
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan string, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
||||
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
||||
var dirs []string
|
||||
var paths []string
|
||||
|
||||
for dir := range in {
|
||||
dirs = append(dirs[:0], dir)
|
||||
wait := time.After(10 * time.Millisecond)
|
||||
dirs = append(dirs[:0], dir.id)
|
||||
paths = append(paths[:0], dir.path)
|
||||
waitloop:
|
||||
for i := 1; i < grouping; i++ {
|
||||
select {
|
||||
@@ -1357,31 +1387,32 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan string, out chan<- error,
|
||||
if !ok {
|
||||
break waitloop
|
||||
}
|
||||
dirs = append(dirs, d)
|
||||
case <-wait:
|
||||
break waitloop
|
||||
dirs = append(dirs, d.id)
|
||||
paths = append(paths, d.path)
|
||||
default:
|
||||
}
|
||||
}
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
parentPath := ""
|
||||
if len(item.Parents) > 0 {
|
||||
p, ok := f.dirCache.GetInv(item.Parents[0])
|
||||
if ok {
|
||||
parentPath = p
|
||||
for _, parent := range item.Parents {
|
||||
// only handle parents that are in the requested dirs list
|
||||
i := sort.SearchStrings(dirs, parent)
|
||||
if i == len(dirs) || dirs[i] != parent {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(paths[i], item.Name)
|
||||
entry, err := f.itemToDirEntry(remote, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
remote := path.Join(parentPath, item.Name)
|
||||
entry, err := f.itemToDirEntry(remote, item)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
|
||||
err = cb(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
err = cb(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -1432,30 +1463,44 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if directoryID == "root" {
|
||||
var info *drive.File
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsTeamDrives(f.isTeamDrive).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID = info.Id
|
||||
}
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan string, inputBuffer)
|
||||
in := make(chan listREntry, inputBuffer)
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overfflow := []string{}
|
||||
overflow := []listREntry{}
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if d, isDir := entry.(*fs.Dir); isDir && in != nil {
|
||||
select {
|
||||
case in <- d.ID():
|
||||
case in <- listREntry{d.ID(), d.Remote()}:
|
||||
wg.Add(1)
|
||||
default:
|
||||
overfflow = append(overfflow, d.ID())
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
in <- directoryID
|
||||
in <- listREntry{directoryID, dir}
|
||||
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
go f.listRRunner(&wg, in, out, cb, grouping)
|
||||
@@ -1464,18 +1509,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
// wait until the all directories are processed
|
||||
wg.Wait()
|
||||
// if the input channel overflowed add the collected entries to the channel now
|
||||
for len(overfflow) > 0 {
|
||||
for len(overflow) > 0 {
|
||||
mu.Lock()
|
||||
l := len(overfflow)
|
||||
// only fill half of the channel to prevent entries beeing put into overfflow again
|
||||
l := len(overflow)
|
||||
// only fill half of the channel to prevent entries beeing put into overflow again
|
||||
if l > inputBuffer/2 {
|
||||
l = inputBuffer / 2
|
||||
}
|
||||
wg.Add(l)
|
||||
for _, d := range overfflow[:l] {
|
||||
for _, d := range overflow[:l] {
|
||||
in <- d
|
||||
}
|
||||
overfflow = overfflow[l:]
|
||||
overflow = overflow[l:]
|
||||
mu.Unlock()
|
||||
|
||||
// wait again for the completion of all directories
|
||||
@@ -1666,14 +1711,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Name, srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.rmdir(srcDir.ID(), true)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -2092,7 +2137,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
@@ -2199,11 +2244,13 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), startPage
|
||||
|
||||
// translate the parent dir of this object
|
||||
if len(change.File.Parents) > 0 {
|
||||
if parentPath, ok := f.dirCache.GetInv(change.File.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
newPath := path.Join(parentPath, change.File.Name)
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
|
||||
for _, parent := range change.File.Parents {
|
||||
if parentPath, ok := f.dirCache.GetInv(parent); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
newPath := path.Join(parentPath, change.File.Name)
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: newPath, entryType: changeType})
|
||||
}
|
||||
}
|
||||
} else { // a true root object that is changed
|
||||
pathsToClear = append(pathsToClear, entryType{path: change.File.Name, entryType: changeType})
|
||||
@@ -2586,6 +2633,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
newO, err := o.fs.newObjectWithInfo(src.Remote(), info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch newO := newO.(type) {
|
||||
case *Object:
|
||||
*o = *newO
|
||||
@@ -2624,6 +2674,9 @@ func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.O
|
||||
remote = remote[:len(remote)-o.extLen]
|
||||
|
||||
newO, err := o.fs.newObjectWithInfo(remote, info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch newO := newO.(type) {
|
||||
case *documentObject:
|
||||
*o = *newO
|
||||
|
||||
@@ -185,7 +185,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// any other 2xx codes, so we parse it unconditionally on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
|
||||
@@ -130,8 +130,8 @@ Any files larger than this will be uploaded in chunks of this size.
|
||||
Note that chunks are buffered in memory (one at a time) so rclone can
|
||||
deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10%% for 128MB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.`, fs.SizeSuffix(maxChunkSize)),
|
||||
Default: fs.SizeSuffix(defaultChunkSize),
|
||||
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
@@ -213,8 +213,8 @@ func shouldRetry(err error) (bool, error) {
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
// Keep old behaviour for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
// Keep old behavior for backward compatibility
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") || baseErrString == "" {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
@@ -239,7 +239,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
|
||||
@@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
|
||||
@@ -162,21 +162,36 @@ func init() {
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-east2",
|
||||
Help: "Hong Kong.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-south1",
|
||||
Help: "Mumbai.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-north1",
|
||||
Help: "Finland.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "europe-west3",
|
||||
Help: "Frankfurt.",
|
||||
}, {
|
||||
Value: "europe-west4",
|
||||
Help: "Netherlands.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
@@ -189,6 +204,9 @@ func init() {
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
}, {
|
||||
Value: "us-west2",
|
||||
Help: "California.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
@@ -282,7 +300,7 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
@@ -330,7 +348,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
|
||||
|
||||
@@ -40,6 +40,9 @@ func init() {
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}, {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
@@ -248,7 +251,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
// mustn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
|
||||
@@ -9,8 +9,10 @@ package hubic
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
@@ -124,7 +126,9 @@ func (f *Fs) getCredentials() (err error) {
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
return errors.Errorf("failed to get credentials: %s", resp.Status)
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.Replace(string(body), "\n", " ", -1))
|
||||
return errors.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Sync"
|
||||
defaultMountpoint = "Sync" // nolint
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/files/v1/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
@@ -103,7 +103,7 @@ func init() {
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. we'lls do another request with the 2fa code as an additional http header
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
||||
@@ -163,7 +163,7 @@ func init() {
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_resume_limit",
|
||||
Help: "Files bigger than this can be resumed if the upload failes.",
|
||||
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -361,7 +361,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refesh token upper case
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
@@ -769,7 +769,7 @@ func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1006,7 +1006,7 @@ func (o *Object) MimeType() string {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.JottaFile) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = int64(info.Size)
|
||||
o.size = info.Size
|
||||
o.md5 = info.MD5
|
||||
o.mimeType = info.MimeType
|
||||
o.modTime = time.Time(info.ModifiedAt)
|
||||
@@ -1080,7 +1080,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||
// we need a MD5
|
||||
md5Hasher := md5.New()
|
||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
||||
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||
teeReader := io.TeeReader(in, md5Hasher)
|
||||
|
||||
// nothing to clean up by default
|
||||
@@ -1212,7 +1212,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
// finally update the meta data
|
||||
o.hasMetaData = true
|
||||
o.size = int64(result.Bytes)
|
||||
o.size = result.Bytes
|
||||
o.md5 = result.Md5
|
||||
o.modTime = time.Unix(result.Modified/1000, 0)
|
||||
} else {
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaClous reserved characters, and can't
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
@@ -16,7 +16,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
bs := int64(s.Bsize) // nolint: unconvert
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
|
||||
@@ -225,10 +225,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// caseInsenstive returns whether the remote is case insensitive or not
|
||||
// caseInsensitive returns whether the remote is case insensitive or not
|
||||
func (f *Fs) caseInsensitive() bool {
|
||||
// FIXME not entirely accurate since you can have case
|
||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
||||
// sensitive Fses on darwin and case insensitive Fses on linux.
|
||||
// Should probably check but that would involve creating a
|
||||
// file in the remote to be most accurate which probably isn't
|
||||
// desirable.
|
||||
@@ -288,7 +288,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// Handle the odd case, that a symlink was specfied by name without the link suffix
|
||||
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -958,7 +958,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
if o.translatedLink {
|
||||
if err == nil {
|
||||
// Remove any current symlink or file, if one exsits
|
||||
// Remove any current symlink or file, if one exists
|
||||
if _, err := os.Lstat(o.path); err == nil {
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||
|
||||
@@ -22,5 +22,5 @@ func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||
fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys())
|
||||
return devUnset
|
||||
}
|
||||
return uint64(statT.Dev)
|
||||
return uint64(statT.Dev) // nolint: unconvert
|
||||
}
|
||||
|
||||
@@ -497,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
// must have setMetaData called on it
|
||||
//
|
||||
// Returns the dirNode, obect, leaf and error
|
||||
// Returns the dirNode, object, leaf and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||
@@ -523,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
@@ -847,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||
}
|
||||
}
|
||||
// rmdir (into trash) the now empty source directory
|
||||
fs.Infof(srcDir, "removing empty directory")
|
||||
err = f.deleteNode(srcDirNode)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1076,6 +1076,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size < 0 {
|
||||
return errors.New("mega backend can't upload a file of unknown length")
|
||||
}
|
||||
//modTime := src.ModTime()
|
||||
remote := o.Remote()
|
||||
|
||||
@@ -1126,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return errors.Wrap(err, "failed to finish upload")
|
||||
}
|
||||
|
||||
// If the upload succeded and the original object existed, then delete it
|
||||
// If the upload succeeded and the original object existed, then delete it
|
||||
if o.info != nil {
|
||||
err = o.fs.deleteNode(o.info)
|
||||
if err != nil {
|
||||
|
||||
@@ -25,7 +25,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := e.ErrorInfo.Code
|
||||
if e.ErrorInfo.InnerError.Code != "" {
|
||||
@@ -35,7 +35,7 @@ func (e *Error) Error() string {
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
@@ -295,9 +295,9 @@ func (i *Item) GetID() string {
|
||||
return i.ID
|
||||
}
|
||||
|
||||
// GetDriveID returns a normalized ParentReferance of the item
|
||||
// GetDriveID returns a normalized ParentReference of the item
|
||||
func (i *Item) GetDriveID() string {
|
||||
return i.GetParentReferance().DriveID
|
||||
return i.GetParentReference().DriveID
|
||||
}
|
||||
|
||||
// GetName returns a normalized Name of the item
|
||||
@@ -398,8 +398,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp {
|
||||
return i.LastModifiedDateTime
|
||||
}
|
||||
|
||||
// GetParentReferance returns a normalized ParentReferance of the item
|
||||
func (i *Item) GetParentReferance() *ItemReference {
|
||||
// GetParentReference returns a normalized ParentReference of the item
|
||||
func (i *Item) GetParentReference() *ItemReference {
|
||||
if i.IsRemote() && i.ParentReference == nil {
|
||||
return i.RemoteItem.ParentReference
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ that the chunks will be buffered into memory.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive ( personal | business | documentLibrary )",
|
||||
Help: "The type of the drive ( " + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + " )",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -324,13 +324,13 @@ var retryErrorCodes = []int{
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety := false
|
||||
authRetry := false
|
||||
|
||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||
authRety = true
|
||||
authRetry = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||
@@ -1488,7 +1488,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size <= 0 {
|
||||
panic("size passed into uploadMultipart must be > 0")
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
@@ -1535,7 +1535,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
panic("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -1602,7 +1602,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(in, size, modTime)
|
||||
} else {
|
||||
panic("src file size must be >= 0")
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -119,7 +119,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
f.dirCache.ResetRoot()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -785,7 +785,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
remote := path.Join(dir, folder.Name)
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(int64(folder.DateModified), 0)).SetID(folder.FolderID)
|
||||
d := fs.NewDir(remote, time.Unix(folder.DateModified, 0)).SetID(folder.FolderID)
|
||||
d.SetItems(int64(folder.ChildFolders))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ type Error struct {
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
// Error satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ type Error struct {
|
||||
ErrorString string `json:"error"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
|
||||
return e
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
@@ -161,7 +161,6 @@ type UserInfo struct {
|
||||
PublicLinkQuota int64 `json:"publiclinkquota"`
|
||||
Email string `json:"email"`
|
||||
UserID int `json:"userid"`
|
||||
Result int `json:"result"`
|
||||
Quota int64 `json:"quota"`
|
||||
TrashRevretentionDays int `json:"trashrevretentiondays"`
|
||||
Premium bool `json:"premium"`
|
||||
|
||||
@@ -385,7 +385,7 @@ func fileIDtoNumber(fileID string) string {
|
||||
if len(fileID) > 0 && fileID[0] == 'f' {
|
||||
return fileID[1:]
|
||||
}
|
||||
fs.Debugf(nil, "Invalid filee id %q", fileID)
|
||||
fs.Debugf(nil, "Invalid file id %q", fileID)
|
||||
return fileID
|
||||
}
|
||||
|
||||
|
||||
@@ -449,7 +449,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
_, err = bucketInit.PutObject(key, &req)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Copied Faild, API Error: %v", err)
|
||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
@@ -756,7 +756,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
switch *statistics.Status {
|
||||
case "deleted":
|
||||
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
|
||||
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||
time.Sleep(time.Second * 1)
|
||||
retries++
|
||||
continue
|
||||
@@ -875,7 +875,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
|
||||
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||
if e.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
|
||||
@@ -143,7 +143,7 @@ func (u *uploader) init() {
|
||||
|
||||
// Try to adjust partSize if it is too small and account for
|
||||
// integer division truncation.
|
||||
if u.totalSize/u.cfg.partSize >= int64(u.cfg.partSize) {
|
||||
if u.totalSize/u.cfg.partSize >= u.cfg.partSize {
|
||||
// Add one to the part size to account for remainders
|
||||
// during the size calculation. e.g odd number of bytes.
|
||||
u.cfg.partSize = (u.totalSize / int64(u.cfg.maxUploadParts)) + 1
|
||||
@@ -163,7 +163,7 @@ func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
||||
|
||||
_, err := bucketInit.PutObject(u.cfg.key, &req)
|
||||
if err == nil {
|
||||
fs.Debugf(u, "Upload single objcet finished")
|
||||
fs.Debugf(u, "Upload single object finished")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -131,6 +131,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
|
||||
}, {
|
||||
Value: "eu-central-1",
|
||||
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
|
||||
@@ -234,10 +237,10 @@ func init() {
|
||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||
Help: "Great Britan Endpoint",
|
||||
Help: "Great Britain Endpoint",
|
||||
}, {
|
||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||
Help: "Great Britan Private Endpoint",
|
||||
Help: "Great Britain Private Endpoint",
|
||||
}, {
|
||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||
Help: "APAC Cross Regional Endpoint",
|
||||
@@ -343,7 +346,7 @@ func init() {
|
||||
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-west-1.dream.io",
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
@@ -392,6 +395,9 @@ func init() {
|
||||
}, {
|
||||
Value: "eu-west-2",
|
||||
Help: "EU (London) Region.",
|
||||
}, {
|
||||
Value: "eu-north-1",
|
||||
Help: "EU (Stockholm) Region.",
|
||||
}, {
|
||||
Value: "EU",
|
||||
Help: "EU Region.",
|
||||
@@ -444,7 +450,7 @@ func init() {
|
||||
Help: "US East Region Flex",
|
||||
}, {
|
||||
Value: "us-south-standard",
|
||||
Help: "US Sout hRegion Standard",
|
||||
Help: "US South Region Standard",
|
||||
}, {
|
||||
Value: "us-south-vault",
|
||||
Help: "US South Region Vault",
|
||||
@@ -468,16 +474,16 @@ func init() {
|
||||
Help: "EU Cross Region Flex",
|
||||
}, {
|
||||
Value: "eu-gb-standard",
|
||||
Help: "Great Britan Standard",
|
||||
Help: "Great Britain Standard",
|
||||
}, {
|
||||
Value: "eu-gb-vault",
|
||||
Help: "Great Britan Vault",
|
||||
Help: "Great Britain Vault",
|
||||
}, {
|
||||
Value: "eu-gb-cold",
|
||||
Help: "Great Britan Cold",
|
||||
Help: "Great Britain Cold",
|
||||
}, {
|
||||
Value: "eu-gb-flex",
|
||||
Help: "Great Britan Flex",
|
||||
Help: "Great Britain Flex",
|
||||
}, {
|
||||
Value: "ap-standard",
|
||||
Help: "APAC Standard",
|
||||
@@ -836,7 +842,7 @@ var retryErrorCodes = []int{
|
||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retriable
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -195,7 +195,7 @@ type Options struct {
|
||||
StorageURL string `config:"storage_url"`
|
||||
AuthToken string `config:"auth_token"`
|
||||
AuthVersion int `config:"auth_version"`
|
||||
ApplicationCredentialId string `config:"application_credential_id"`
|
||||
ApplicationCredentialID string `config:"application_credential_id"`
|
||||
ApplicationCredentialName string `config:"application_credential_name"`
|
||||
ApplicationCredentialSecret string `config:"application_credential_secret"`
|
||||
StoragePolicy string `config:"storage_policy"`
|
||||
@@ -317,7 +317,7 @@ func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||
StorageUrl: opt.StorageURL,
|
||||
AuthToken: opt.AuthToken,
|
||||
AuthVersion: opt.AuthVersion,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialId,
|
||||
ApplicationCredentialId: opt.ApplicationCredentialID,
|
||||
ApplicationCredentialName: opt.ApplicationCredentialName,
|
||||
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
|
||||
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||
@@ -430,7 +430,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
|
||||
@@ -177,8 +177,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implemantion must empty the channel
|
||||
// regulary. When the channel gets closed, the implemantion
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
var remoteChans []chan time.Duration
|
||||
|
||||
@@ -66,12 +66,13 @@ type Response struct {
|
||||
// Note that status collects all the status values for which we just
|
||||
// check the first is OK.
|
||||
type Prop struct {
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
Status []string `xml:"DAV: status"`
|
||||
Name string `xml:"DAV: prop>displayname,omitempty"`
|
||||
Type *xml.Name `xml:"DAV: prop>resourcetype>collection,omitempty"`
|
||||
IsCollection *string `xml:"DAV: prop>iscollection,omitempty"` // this is a Microsoft extension see #2716
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
}
|
||||
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
@@ -123,7 +124,7 @@ type PropValue struct {
|
||||
Value string `xml:",chardata"`
|
||||
}
|
||||
|
||||
// Error is used to desribe webdav errors
|
||||
// Error is used to describe webdav errors
|
||||
//
|
||||
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||
@@ -136,7 +137,7 @@ type Error struct {
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
var out []string
|
||||
if e.Message != "" {
|
||||
|
||||
@@ -102,7 +102,7 @@ func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
|
||||
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
||||
spRoot, err := url.Parse(ca.endpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while contructing endpoint URL")
|
||||
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||
}
|
||||
|
||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||
@@ -121,7 +121,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
||||
Jar: jar,
|
||||
}
|
||||
|
||||
// Send the previously aquired Token as a Post parameter
|
||||
// Send the previously acquired Token as a Post parameter
|
||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
||||
}
|
||||
|
||||
@@ -2,13 +2,10 @@ package odrvcookie
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// CookieRenew holds information for the renew
|
||||
type CookieRenew struct {
|
||||
srv *rest.Client
|
||||
timer *time.Ticker
|
||||
renewFn func()
|
||||
}
|
||||
|
||||
@@ -172,6 +172,18 @@ func itemIsDir(item *api.Response) bool {
|
||||
}
|
||||
fs.Debugf(nil, "Unknown resource type %q/%q on %q", t.Space, t.Local, item.Props.Name)
|
||||
}
|
||||
// the iscollection prop is a Microsoft extension, but if present it is a reliable indicator
|
||||
// if the above check failed - see #2716. This can be an integer or a boolean - see #2964
|
||||
if t := item.Props.IsCollection; t != nil {
|
||||
switch x := strings.ToLower(*t); x {
|
||||
case "0", "false":
|
||||
return false
|
||||
case "1", "true":
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(nil, "Unknown value %q for IsCollection", x)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -244,7 +256,7 @@ func errorHandler(resp *http.Response) error {
|
||||
return errResponse
|
||||
}
|
||||
|
||||
// addShlash makes sure s is terminated with a / if non empty
|
||||
// addSlash makes sure s is terminated with a / if non empty
|
||||
func addSlash(s string) string {
|
||||
if s != "" && !strings.HasSuffix(s, "/") {
|
||||
s += "/"
|
||||
@@ -644,7 +656,8 @@ func (f *Fs) mkdir(dirPath string) error {
|
||||
err := f._mkdir(dirPath)
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// already exists
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable {
|
||||
// owncloud returns 423/StatusLocked if the create is already in progress
|
||||
if apiErr.StatusCode == http.StatusMethodNotAllowed || apiErr.StatusCode == http.StatusNotAcceptable || apiErr.StatusCode == http.StatusLocked {
|
||||
return nil
|
||||
}
|
||||
// parent does not exist
|
||||
|
||||
@@ -56,7 +56,7 @@ type AsyncInfo struct {
|
||||
Templated bool `json:"templated"`
|
||||
}
|
||||
|
||||
// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure
|
||||
// AsyncStatus is returned when requesting the status of an async operations. Possible values in-progress, success, failure
|
||||
type AsyncStatus struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
@@ -307,7 +307,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error parsing time in directory item")
|
||||
}
|
||||
d := fs.NewDir(remote, t).SetSize(int64(object.Size))
|
||||
d := fs.NewDir(remote, t).SetSize(object.Size)
|
||||
return d, nil
|
||||
case "file":
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
@@ -634,7 +634,7 @@ func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
}
|
||||
|
||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -1107,7 +1107,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
return err
|
||||
}
|
||||
|
||||
//if file uploaded sucessfully then return metadata
|
||||
//if file uploaded successfully then return metadata
|
||||
o.modTime = modTime
|
||||
o.md5sum = "" // according to unit tests after put the md5 is empty.
|
||||
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -19,6 +21,7 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -31,7 +34,13 @@ var (
|
||||
extract = flag.String("extract", "", "Extract the named executable from the .tar.gz and install into bindir.")
|
||||
bindir = flag.String("bindir", defaultBinDir(), "Directory to install files downloaded with -extract.")
|
||||
// Globals
|
||||
matchProject = regexp.MustCompile(`^(\w+)/(\w+)$`)
|
||||
matchProject = regexp.MustCompile(`^([\w-]+)/([\w-]+)$`)
|
||||
osAliases = map[string][]string{
|
||||
"darwin": []string{"macos", "osx"},
|
||||
}
|
||||
archAliases = map[string][]string{
|
||||
"amd64": []string{"x86_64"},
|
||||
}
|
||||
)
|
||||
|
||||
// A github release
|
||||
@@ -113,25 +122,41 @@ func writable(path string) bool {
|
||||
|
||||
// Directory to install releases in by default
|
||||
//
|
||||
// Find writable directories on $PATH. Use the first writable
|
||||
// directory which is in $HOME or failing that the first writable
|
||||
// directory.
|
||||
// Find writable directories on $PATH. Use $GOPATH/bin if that is on
|
||||
// the path and writable or use the first writable directory which is
|
||||
// in $HOME or failing that the first writable directory.
|
||||
//
|
||||
// Returns "" if none of the above were found
|
||||
func defaultBinDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
var binDir string
|
||||
var (
|
||||
bin string
|
||||
homeBin string
|
||||
goHomeBin string
|
||||
gopath = os.Getenv("GOPATH")
|
||||
)
|
||||
for _, dir := range strings.Split(os.Getenv("PATH"), ":") {
|
||||
if writable(dir) {
|
||||
if strings.HasPrefix(dir, home) {
|
||||
return dir
|
||||
if homeBin != "" {
|
||||
homeBin = dir
|
||||
}
|
||||
if gopath != "" && strings.HasPrefix(dir, gopath) && goHomeBin == "" {
|
||||
goHomeBin = dir
|
||||
}
|
||||
}
|
||||
if binDir != "" {
|
||||
binDir = dir
|
||||
if bin == "" {
|
||||
bin = dir
|
||||
}
|
||||
}
|
||||
}
|
||||
return binDir
|
||||
if goHomeBin != "" {
|
||||
return goHomeBin
|
||||
}
|
||||
if homeBin != "" {
|
||||
return homeBin
|
||||
}
|
||||
return bin
|
||||
}
|
||||
|
||||
// read the body or an error message
|
||||
@@ -175,7 +200,8 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
|
||||
}
|
||||
|
||||
for _, asset := range release.Assets {
|
||||
if matchName.MatchString(asset.Name) {
|
||||
//log.Printf("Finding %s", asset.Name)
|
||||
if matchName.MatchString(asset.Name) && isOurOsArch(asset.Name) {
|
||||
return asset.BrowserDownloadURL, asset.Name
|
||||
}
|
||||
}
|
||||
@@ -183,6 +209,22 @@ func getAsset(project string, matchName *regexp.Regexp) (string, string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// isOurOsArch returns true if s contains our OS and our Arch
|
||||
func isOurOsArch(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
check := func(base string, aliases map[string][]string) bool {
|
||||
names := []string{base}
|
||||
names = append(names, aliases[base]...)
|
||||
for _, name := range names {
|
||||
if strings.Contains(s, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return check(runtime.GOARCH, archAliases) && check(runtime.GOOS, osAliases)
|
||||
}
|
||||
|
||||
// get a file for download
|
||||
func getFile(url, fileName string) {
|
||||
log.Printf("Downloading %q from %q", fileName, url)
|
||||
@@ -229,6 +271,66 @@ func run(args ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Untars fileName from srcFile
|
||||
func untar(srcFile, fileName, extractDir string) {
|
||||
f, err := os.Open(srcFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open tar: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't close tar: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var in io.Reader = f
|
||||
|
||||
srcExt := filepath.Ext(srcFile)
|
||||
if srcExt == ".gz" || srcExt == ".tgz" {
|
||||
gzf, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open gzip: %v", err)
|
||||
}
|
||||
in = gzf
|
||||
}
|
||||
|
||||
tarReader := tar.NewReader(in)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Trouble reading tar file: %v", err)
|
||||
}
|
||||
name := header.Name
|
||||
switch header.Typeflag {
|
||||
case tar.TypeReg:
|
||||
baseName := filepath.Base(name)
|
||||
if baseName == fileName {
|
||||
outPath := filepath.Join(extractDir, fileName)
|
||||
out, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open output file: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
err := out.Close()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't close output: %v", err)
|
||||
}
|
||||
}()
|
||||
n, err := io.Copy(out, tarReader)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't write output file: %v", err)
|
||||
}
|
||||
log.Printf("Wrote %s (%d bytes) as %q", fileName, n, outPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
@@ -257,8 +359,6 @@ func main() {
|
||||
log.Fatalf("Need to set -bindir")
|
||||
}
|
||||
log.Printf("Unpacking %s from %s and installing into %s", *extract, fileName, *bindir)
|
||||
run("tar", "xf", fileName, *extract)
|
||||
run("chmod", "a+x", *extract)
|
||||
run("mv", "-f", *extract, *bindir+"/")
|
||||
untar(fileName, *extract, *bindir+"/")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ github-release release \
|
||||
--name "rclone" \
|
||||
--description "Rclone - rsync for cloud storage. Sync files to and from many cloud storage providers."
|
||||
|
||||
for build in `ls build | grep -v current`; do
|
||||
for build in `ls build | grep -v current | grep -v testbuilds`; do
|
||||
echo "Uploading ${build}"
|
||||
base="${build%.*}"
|
||||
parts=(${base//-/ })
|
||||
|
||||
@@ -456,7 +456,7 @@ func AddBackendFlags() {
|
||||
help = help[:nl]
|
||||
}
|
||||
help = strings.TrimSpace(help)
|
||||
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
|
||||
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
|
||||
if _, isBool := opt.Default.(bool); isBool {
|
||||
flag.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
39
cmd/help.go
39
cmd/help.go
@@ -32,8 +32,47 @@ documentation, changelog and configuration walkthroughs.
|
||||
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
||||
atexit.Run()
|
||||
},
|
||||
BashCompletionFunction: bashCompletionFunc,
|
||||
}
|
||||
|
||||
const (
|
||||
bashCompletionFunc = `
|
||||
__rclone_custom_func() {
|
||||
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
|
||||
local cur cword prev words
|
||||
if declare -F _init_completion > /dev/null; then
|
||||
_init_completion -n : || return
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
if [[ $cur =~ ^[[:alnum:]]*$ ]]; then
|
||||
local remote
|
||||
while IFS= read -r remote; do
|
||||
[[ $remote != $cur* ]] || COMPREPLY+=("$remote")
|
||||
done < <(command rclone listremotes)
|
||||
if [[ ${COMPREPLY[@]} ]]; then
|
||||
local paths=("$cur"*)
|
||||
[[ ! -f ${paths[0]} ]] || COMPREPLY+=("${paths[@]}")
|
||||
fi
|
||||
elif [[ $cur =~ ^[[:alnum:]]+: ]]; then
|
||||
local path=${cur#*:}
|
||||
if [[ $path == */* ]]; then
|
||||
local prefix=${path%/*}
|
||||
else
|
||||
local prefix=
|
||||
fi
|
||||
local line
|
||||
while IFS= read -r line; do
|
||||
local reply=${prefix:+$prefix/}$line
|
||||
[[ $reply != $path* ]] || COMPREPLY+=("$reply")
|
||||
done < <(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)
|
||||
fi
|
||||
[[ ! ${COMPREPLY[@]} ]] || compopt -o nospace
|
||||
fi
|
||||
}
|
||||
`
|
||||
)
|
||||
|
||||
// root help command
|
||||
var helpCommand = &cobra.Command{
|
||||
Use: "help",
|
||||
|
||||
123
cmd/info/info.go
123
cmd/info/info.go
@@ -21,11 +21,22 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type position int
|
||||
|
||||
const (
|
||||
positionMiddle position = 1 << iota
|
||||
positionLeft
|
||||
positionRight
|
||||
positionNone position = 0
|
||||
positionAll position = positionRight<<1 - 1
|
||||
)
|
||||
|
||||
var (
|
||||
checkNormalization bool
|
||||
checkControl bool
|
||||
checkLength bool
|
||||
checkStreaming bool
|
||||
positionList = []position{positionMiddle, positionLeft, positionRight}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -59,7 +70,7 @@ a bit of go code for each one.
|
||||
type results struct {
|
||||
f fs.Fs
|
||||
mu sync.Mutex
|
||||
charNeedsEscaping map[rune]bool
|
||||
stringNeedsEscaping map[string]position
|
||||
maxFileLength int
|
||||
canWriteUnnormalized bool
|
||||
canReadUnnormalized bool
|
||||
@@ -69,8 +80,8 @@ type results struct {
|
||||
|
||||
func newResults(f fs.Fs) *results {
|
||||
return &results{
|
||||
f: f,
|
||||
charNeedsEscaping: make(map[rune]bool),
|
||||
f: f,
|
||||
stringNeedsEscaping: make(map[string]position),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,13 +90,13 @@ func (r *results) Print() {
|
||||
fmt.Printf("// %s\n", r.f.Name())
|
||||
if checkControl {
|
||||
escape := []string{}
|
||||
for c, needsEscape := range r.charNeedsEscaping {
|
||||
if needsEscape {
|
||||
for c, needsEscape := range r.stringNeedsEscaping {
|
||||
if needsEscape != positionNone {
|
||||
escape = append(escape, fmt.Sprintf("0x%02X", c))
|
||||
}
|
||||
}
|
||||
sort.Strings(escape)
|
||||
fmt.Printf("charNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("stringNeedsEscaping = []byte{\n")
|
||||
fmt.Printf("\t%s\n", strings.Join(escape, ", "))
|
||||
fmt.Printf("}\n")
|
||||
}
|
||||
@@ -130,20 +141,45 @@ func (r *results) checkUTF8Normalization() {
|
||||
}
|
||||
}
|
||||
|
||||
// check we can write file with the rune passed in
|
||||
func (r *results) checkChar(c rune) {
|
||||
fs.Infof(r.f, "Writing file 0x%02X", c)
|
||||
path := fmt.Sprintf("0x%02X-%c-", c, c)
|
||||
_, err := r.writeFile(path)
|
||||
escape := false
|
||||
if err != nil {
|
||||
fs.Infof(r.f, "Couldn't write file 0x%02X", c)
|
||||
escape = true
|
||||
} else {
|
||||
fs.Infof(r.f, "OK writing file 0x%02X", c)
|
||||
func (r *results) checkStringPositions(s string) {
|
||||
fs.Infof(r.f, "Writing position file 0x%0X", s)
|
||||
positionError := positionNone
|
||||
|
||||
for _, pos := range positionList {
|
||||
path := ""
|
||||
switch pos {
|
||||
case positionMiddle:
|
||||
path = fmt.Sprintf("position-middle-%0X-%s-", s, s)
|
||||
case positionLeft:
|
||||
path = fmt.Sprintf("%s-position-left-%0X", s, s)
|
||||
case positionRight:
|
||||
path = fmt.Sprintf("position-right-%0X-%s", s, s)
|
||||
default:
|
||||
panic("invalid position: " + pos.String())
|
||||
}
|
||||
_, writeErr := r.writeFile(path)
|
||||
if writeErr != nil {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X Error: %s", pos.String(), s, writeErr)
|
||||
} else {
|
||||
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
obj, getErr := r.f.NewObject(path)
|
||||
if getErr != nil {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
||||
} else {
|
||||
if obj.Size() != 50 {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X Invalid Size: %d", pos.String(), s, obj.Size())
|
||||
} else {
|
||||
fs.Infof(r.f, "Getting %s position file 0x%0X OK", pos.String(), s)
|
||||
}
|
||||
}
|
||||
if writeErr != nil || getErr != nil {
|
||||
positionError += pos
|
||||
}
|
||||
}
|
||||
|
||||
r.mu.Lock()
|
||||
r.charNeedsEscaping[c] = escape
|
||||
r.stringNeedsEscaping[s] = positionError
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -157,19 +193,28 @@ func (r *results) checkControls() {
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := rune(0); i < 128; i++ {
|
||||
s := string(i)
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
r.charNeedsEscaping[i] = true
|
||||
r.stringNeedsEscaping[s] = positionAll
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
c := i
|
||||
go func() {
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkChar(c)
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}()
|
||||
}(s)
|
||||
}
|
||||
for _, s := range []string{"\", "\xBF", "\xFE"} {
|
||||
wg.Add(1)
|
||||
go func(s string) {
|
||||
defer wg.Done()
|
||||
token := <-tokens
|
||||
r.checkStringPositions(s)
|
||||
tokens <- token
|
||||
}(s)
|
||||
}
|
||||
wg.Wait()
|
||||
fs.Infof(r.f, "Done trying to create control character file names")
|
||||
@@ -268,3 +313,35 @@ func readInfo(f fs.Fs) error {
|
||||
r.Print()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e position) String() string {
|
||||
switch e {
|
||||
case positionNone:
|
||||
return "none"
|
||||
case positionAll:
|
||||
return "all"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if e&positionMiddle != 0 {
|
||||
buf.WriteString("middle")
|
||||
e &= ^positionMiddle
|
||||
}
|
||||
if e&positionLeft != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("left")
|
||||
e &= ^positionLeft
|
||||
}
|
||||
if e&positionRight != 0 {
|
||||
if buf.Len() != 0 {
|
||||
buf.WriteRune(',')
|
||||
}
|
||||
buf.WriteString("right")
|
||||
e &= ^positionRight
|
||||
}
|
||||
if e != positionNone {
|
||||
panic("invalid position")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
40
cmd/info/process.sh
Normal file
40
cmd/info/process.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
set -euo pipefail
|
||||
|
||||
for f in info-*.log; do
|
||||
for pos in middle left right; do
|
||||
egrep -oe " Writing $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.write_$pos
|
||||
egrep -oe " Getting $pos position file [^ ]* \w+" $f | sort | cut -d' ' -f 7 > $f.get_$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}\t${${f%.log}#info-}"
|
||||
echo "Write\tWrite\tWrite\tGet\tGet\tGet"
|
||||
echo "Mid\tLeft\tRight\tMid\tLeft\tRight"
|
||||
paste $f.write_{middle,left,right} $f.get_{middle,left,right}
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
for pos in middle left right; do
|
||||
cat $f | perl -lne 'print $1 if /^\s+[0-9]+\s+(.*)/' | grep -a "position-$pos-" | sort > $f.$pos
|
||||
done
|
||||
{
|
||||
echo "${${f%.list}#info-}\t${${f%.list}#info-}\t${${f%.list}#info-}"
|
||||
echo "List\tList\tList"
|
||||
echo "Mid\tLeft\tRight"
|
||||
for e in 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E 1F 20 21 22 23 24 25 26 27 28 29 2A 2B 2C 2D 2E 30 31 32 33 34 35 36 37 38 39 3A 3B 3C 3D 3E 3F 40 41 42 43 44 45 46 47 48 49 4A 4B 4C 4D 4E 4F 50 51 52 53 54 55 56 57 58 59 5A 5B 5C 5D 5E 5F 60 61 62 63 64 65 66 67 68 69 6A 6B 6C 6D 6E 6F 70 71 72 73 74 75 76 77 78 79 7A 7B 7C 7D 7E 7F BF EFBCBC FE; do
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^position-middle-'$e'-(.*)-/' $f.middle | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo -n $(perl -lne 'print "'$e'-$1" if /^(.*)-position-left-'$e'/' $f.left | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
echo -n "\t"
|
||||
echo $(perl -lne 'print "'$e'-$1" if /^position-right-'$e'-(.*)/' $f.right | tr -d "\t\r" | grep -a . || echo Miss)
|
||||
# echo -n $(grep -a "position-middle-$e-" $f.middle | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo -n $(grep -a "position-left-$e" $f.left | tr -d "\t\r" || echo Miss)"\t"
|
||||
# echo $(grep -a "position-right-$e-" $f.right | tr -d "\t\r" || echo Miss)
|
||||
done
|
||||
} > $f.csv
|
||||
done
|
||||
|
||||
for f in info-*.list; do
|
||||
paste ${f%.list}.log.csv $f.csv > ${f%.list}.full.csv
|
||||
done
|
||||
paste *.full.csv > info-complete.csv
|
||||
3
cmd/info/test.cmd
Normal file
3
cmd/info/test.cmd
Normal file
@@ -0,0 +1,3 @@
|
||||
rclone.exe purge info
|
||||
rclone.exe info -vv info > info-LocalWindows.log 2>&1
|
||||
rclone.exe ls -vv info > info-LocalWindows.list 2>&1
|
||||
43
cmd/info/test.sh
Executable file
43
cmd/info/test.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env zsh
|
||||
#
|
||||
# example usage:
|
||||
# $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh --list | \
|
||||
# parallel -P20 $GOPATH/src/github.com/ncw/rclone/cmd/info/test.sh
|
||||
|
||||
export PATH=$GOPATH/src/github.com/ncw/rclone:$PATH
|
||||
|
||||
typeset -A allRemotes
|
||||
allRemotes=(
|
||||
TestAmazonCloudDrive '--low-level-retries=2 --checkers=5'
|
||||
TestB2 ''
|
||||
TestBox ''
|
||||
TestDrive '--tpslimit=5'
|
||||
TestCrypt ''
|
||||
TestDropbox '--checkers=1'
|
||||
TestJottacloud ''
|
||||
TestMega ''
|
||||
TestOneDrive ''
|
||||
TestOpenDrive '--low-level-retries=2 --checkers=5'
|
||||
TestPcloud '--low-level-retries=2 --timeout=15s'
|
||||
TestS3 ''
|
||||
Local ''
|
||||
)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
set -- ${(k)allRemotes[@]}
|
||||
elif [[ $1 = --list ]]; then
|
||||
printf '%s\n' ${(k)allRemotes[@]}
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for remote; do
|
||||
dir=$remote:infotest
|
||||
if [[ $remote = Local ]]; then
|
||||
dir=infotest
|
||||
fi
|
||||
rclone purge $dir || :
|
||||
rclone info -vv $dir ${=allRemotes[$remote]} &> info-$remote.log
|
||||
rclone ls -vv $dir &> info-$remote.list
|
||||
done
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&listLong, "long", "l", listLong, "Show the type as well as names.")
|
||||
commandDefintion.Flags().BoolVarP(&listLong, "long", "", listLong, "Show the type as well as names.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
|
||||
@@ -60,7 +60,13 @@ If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
||||
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
|
||||
When used without --recursive the Path will always be the same as Name.
|
||||
|
||||
The time is in RFC3339 format with nanosecond precision.
|
||||
The time is in RFC3339 format with up to nanosecond precision. The
|
||||
number of decimal digits in the seconds will depend on the precision
|
||||
that the remote can hold the times, so if times are accurate to the
|
||||
nearest millisecond (eg Google Drive) then 3 digits will always be
|
||||
shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are
|
||||
accurate to the nearest second (Dropbox, Box, WebDav etc) no digits
|
||||
will be shown ("2017-05-31T16:15:57+01:00").
|
||||
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
|
||||
@@ -45,7 +45,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
resp.Size = int(n)
|
||||
resp.Size = n
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,12 @@ var (
|
||||
)
|
||||
|
||||
func randomSeekTest(size int64, in *os.File, name string) {
|
||||
startTime := time.Now()
|
||||
start := rand.Int63n(size)
|
||||
blockSize := rand.Intn(*maxBlockSize)
|
||||
if int64(blockSize) > size-start {
|
||||
blockSize = int(size - start)
|
||||
}
|
||||
log.Printf("Reading %d from %d", blockSize, start)
|
||||
|
||||
_, err := in.Seek(start, io.SeekStart)
|
||||
if err != nil {
|
||||
@@ -37,6 +37,8 @@ func randomSeekTest(size int64, in *os.File, name string) {
|
||||
if err != nil {
|
||||
log.Fatalf("Read failed on %q: %v", name, err)
|
||||
}
|
||||
|
||||
log.Printf("Reading %d from %d took %v ", blockSize, start, time.Since(startTime))
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -48,10 +50,12 @@ func main() {
|
||||
rand.Seed(*randSeed)
|
||||
|
||||
name := args[0]
|
||||
openStart := time.Now()
|
||||
in, err := os.Open(name)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't open %q: %v", name, err)
|
||||
}
|
||||
log.Printf("File Open took %v", time.Since(openStart))
|
||||
|
||||
fi, err := in.Stat()
|
||||
if err != nil {
|
||||
|
||||
@@ -158,7 +158,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
||||
}, nil
|
||||
case "Browse":
|
||||
var browse browse
|
||||
if err := xml.Unmarshal([]byte(argsXML), &browse); err != nil {
|
||||
if err := xml.Unmarshal(argsXML, &browse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := cds.objectFromID(browse.ObjectID)
|
||||
@@ -179,7 +179,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
||||
}
|
||||
return
|
||||
}():]
|
||||
if browse.RequestedCount != 0 && int(browse.RequestedCount) < len(objs) {
|
||||
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
|
||||
objs = objs[:browse.RequestedCount]
|
||||
}
|
||||
result, err := xml.Marshal(objs)
|
||||
|
||||
@@ -233,3 +233,7 @@ Contributors
|
||||
* kayrus <kay.diam@gmail.com>
|
||||
* Rémy Léone <remy.leone@gmail.com>
|
||||
* Wojciech Smigielski <wojciech.hieronim.smigielski@gmail.com>
|
||||
* weetmuts <oehrstroem@gmail.com>
|
||||
* Jonathan <vanillajonathan@users.noreply.github.com>
|
||||
* James Carpenter <orbsmiv@users.noreply.github.com>
|
||||
* Vince <vince0villamora@gmail.com>
|
||||
|
||||
@@ -16,9 +16,11 @@ Here is an example of making a b2 configuration. First run
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process. You will
|
||||
need your account number (a short hex number) and key (a long hex
|
||||
number) which you can get from the b2 control panel.
|
||||
This will guide you through an interactive setup process. To authenticate
|
||||
you will either need your Account ID (a short hex number) and Master
|
||||
Application Key (a long hex number) OR an Application Key, which is the
|
||||
recommended method. See below for further details on generating and using
|
||||
an Application Key.
|
||||
|
||||
```
|
||||
No remotes found - make a new one
|
||||
@@ -102,10 +104,10 @@ You can use these with rclone too; you will need to use rclone version 1.43
|
||||
or later.
|
||||
|
||||
Follow Backblaze's docs to create an Application Key with the required
|
||||
permission and add the `Application Key ID` as the `account` and the
|
||||
permission and add the `applicationKeyId` as the `account` and the
|
||||
`Application Key` itself as the `key`.
|
||||
|
||||
Note that you must put the Application Key ID as the `account` - you
|
||||
Note that you must put the _applicationKeyId_ as the `account` – you
|
||||
can't use the master Account ID. If you try then B2 will return 401
|
||||
errors.
|
||||
|
||||
@@ -391,12 +393,21 @@ Upload chunk size. Must fit in memory.
|
||||
When uploading large files, chunk the file into this size. Note that
|
||||
these chunks are buffered in memory and there might a maximum of
|
||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||
minimim size.
|
||||
minimum size.
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_B2_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 96M
|
||||
|
||||
#### --b2-disable-checksum
|
||||
|
||||
Disable checksums for large (> upload cutoff) files
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_B2_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
<!--- autogenerated options stop -->
|
||||
|
||||
|
||||
@@ -112,6 +112,17 @@ To copy a local directory to an Box directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
### Using rclone with an Enterprise account with SSO ###
|
||||
|
||||
If you have an "Enterprise" account type with Box with single sign on
|
||||
(SSO), you need to create a password to use Box with rclone. This can
|
||||
be done at your Enterprise Box account by going to Settings, "Account"
|
||||
Tab, and then set the password in the "Authentication" field.
|
||||
|
||||
Once you have done this, you can setup your Enterprise Box account
|
||||
using the same procedure detailed above in the, using the password you
|
||||
have just set.
|
||||
|
||||
### Invalid refresh token ###
|
||||
|
||||
According to the [box docs](https://developer.box.com/v2.0/docs/oauth-20#section-6-using-the-access-and-refresh-tokens):
|
||||
|
||||
@@ -1,11 +1,140 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2018-11-24"
|
||||
date: "2019-02-09"
|
||||
---
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.46 - 2019-02-09
|
||||
|
||||
* New backends
|
||||
* Support Alibaba Cloud (Aliyun) OSS via the s3 backend (Nick Craig-Wood)
|
||||
* New commands
|
||||
* serve dlna: serves a remove via DLNA for the local network (nicolov)
|
||||
* New Features
|
||||
* copy, move: Restore deprecated `--no-traverse` flag (Nick Craig-Wood)
|
||||
* This is useful for when transferring a small number of files into a large destination
|
||||
* genautocomplete: Add remote path completion for bash completion (Christopher Peterson & Danil Semelenov)
|
||||
* Buffer memory handling reworked to return memory to the OS better (Nick Craig-Wood)
|
||||
* Buffer recycling library to replace sync.Pool
|
||||
* Optionally use memory mapped memory for better memory shrinking
|
||||
* Enable with `--use-mmap` if having memory problems - not default yet
|
||||
* Parallelise reading of files specified by `--files-from` (Nick Craig-Wood)
|
||||
* check: Add stats showing total files matched. (Dario Guzik)
|
||||
* Allow rename/delete open files under Windows (Nick Craig-Wood)
|
||||
* lsjson: Use exactly the correct number of decimal places in the seconds (Nick Craig-Wood)
|
||||
* Add cookie support with cmdline switch `--use-cookies` for all HTTP based remotes (qip)
|
||||
* Warn if `--checksum` is set but there are no hashes available (Nick Craig-Wood)
|
||||
* Rework rate limiting (pacer) to be more accurate and allow bursting (Nick Craig-Wood)
|
||||
* Improve error reporting for too many/few arguments in commands (Nick Craig-Wood)
|
||||
* listremotes: Remove `-l` short flag as it conflicts with the new global flag (weetmuts)
|
||||
* Make http serving with auth generate INFO messages on auth fail (Nick Craig-Wood)
|
||||
* Bug Fixes
|
||||
* Fix layout of stats (Nick Craig-Wood)
|
||||
* Fix `--progress` crash under Windows Jenkins (Nick Craig-Wood)
|
||||
* Fix transfer of google/onedrive docs by calling Rcat in Copy when size is -1 (Cnly)
|
||||
* copyurl: Fix checking of `--dry-run` (Denis Skovpen)
|
||||
* Mount
|
||||
* Check that mountpoint and local directory to mount don't overlap (Nick Craig-Wood)
|
||||
* Fix mount size under 32 bit Windows (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Implement renaming of directories for backends without DirMove (Nick Craig-Wood)
|
||||
* now all backends except b2 support renaming directories
|
||||
* Implement `--vfs-cache-max-size` to limit the total size of the cache (Nick Craig-Wood)
|
||||
* Add `--dir-perms` and `--file-perms` flags to set default permissions (Nick Craig-Wood)
|
||||
* Fix deadlock on concurrent operations on a directory (Nick Craig-Wood)
|
||||
* Fix deadlock between RWFileHandle.close and File.Remove (Nick Craig-Wood)
|
||||
* Fix renaming/deleting open files with cache mode "writes" under Windows (Nick Craig-Wood)
|
||||
* Fix panic on rename with `--dry-run` set (Nick Craig-Wood)
|
||||
* Fix vfs/refresh with recurse=true needing the `--fast-list` flag
|
||||
* Local
|
||||
* Add support for `-l`/`--links` (symbolic link translation) (yair@unicorn)
|
||||
* this works by showing links as `link.rclonelink` - see local backend docs for more info
|
||||
* this errors if used with `-L`/`--copy-links`
|
||||
* Fix renaming/deleting open files on Windows (Nick Craig-Wood)
|
||||
* Crypt
|
||||
* Check for maximum length before decrypting filename to fix panic (Garry McNulty)
|
||||
* Azure Blob
|
||||
* Allow building azureblob backend on *BSD (themylogin)
|
||||
* Use the rclone HTTP client to support `--dump headers`, `--tpslimit` etc (Nick Craig-Wood)
|
||||
* Use the s3 pacer for 0 delay in non error conditions (Nick Craig-Wood)
|
||||
* Ignore directory markers (Nick Craig-Wood)
|
||||
* Stop Mkdir attempting to create existing containers (Nick Craig-Wood)
|
||||
* B2
|
||||
* cleanup: will remove unfinished large files >24hrs old (Garry McNulty)
|
||||
* For a bucket limited application key check the bucket name (Nick Craig-Wood)
|
||||
* before this, rclone would use the authorised bucket regardless of what you put on the command line
|
||||
* Added `--b2-disable-checksum` flag (Wojciech Smigielski)
|
||||
* this enables large files to be uploaded without a SHA-1 hash for speed reasons
|
||||
* Drive
|
||||
* Set default pacer to 100ms for 10 tps (Nick Craig-Wood)
|
||||
* This fits the Google defaults much better and reduces the 403 errors massively
|
||||
* Add `--drive-pacer-min-sleep` and `--drive-pacer-burst` to control the pacer
|
||||
* Improve ChangeNotify support for items with multiple parents (Fabian Möller)
|
||||
* Fix ListR for items with multiple parents - this fixes oddities with `vfs/refresh` (Fabian Möller)
|
||||
* Fix using `--drive-impersonate` and appfolders (Nick Craig-Wood)
|
||||
* Fix google docs in rclone mount for some (not all) applications (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry-After support for Dropbox backend (Mathieu Carbou)
|
||||
* FTP
|
||||
* Wait for 60 seconds for a connection to Close then declare it dead (Nick Craig-Wood)
|
||||
* helps with indefinite hangs on some FTP servers
|
||||
* Google Cloud Storage
|
||||
* Update google cloud storage endpoints (weetmuts)
|
||||
* HTTP
|
||||
* Add an example with username and password which is supported but wasn't documented (Nick Craig-Wood)
|
||||
* Fix backend with `--files-from` and non-existent files (Nick Craig-Wood)
|
||||
* Hubic
|
||||
* Make error message more informative if authentication fails (Nick Craig-Wood)
|
||||
* Jottacloud
|
||||
* Resume and deduplication support (Oliver Heyme)
|
||||
* Use token auth for all API requests Don't store password anymore (Sebastian Bünger)
|
||||
* Add support for 2-factor authentification (Sebastian Bünger)
|
||||
* Mega
|
||||
* Implement v2 account login which fixes logins for newer Mega accounts (Nick Craig-Wood)
|
||||
* Return error if an unknown length file is attempted to be uploaded (Nick Craig-Wood)
|
||||
* Add new error codes for better error reporting (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Fix broken support for "shared with me" folders (Alex Chen)
|
||||
* Fix root ID not normalised (Cnly)
|
||||
* Return err instead of panic on unknown-sized uploads (Cnly)
|
||||
* Qingstor
|
||||
* Fix go routine leak on multipart upload errors (Nick Craig-Wood)
|
||||
* Add upload chunk size/concurrency/cutoff control (Nick Craig-Wood)
|
||||
* Default `--qingstor-upload-concurrency` to 1 to work around bug (Nick Craig-Wood)
|
||||
* S3
|
||||
* Implement `--s3-upload-cutoff` for single part uploads below this (Nick Craig-Wood)
|
||||
* Change `--s3-upload-concurrency` default to 4 to increase perfomance (Nick Craig-Wood)
|
||||
* Add `--s3-bucket-acl` to control bucket ACL (Nick Craig-Wood)
|
||||
* Auto detect region for buckets on operation failure (Nick Craig-Wood)
|
||||
* Add GLACIER storage class (William Cocker)
|
||||
* Add Scaleway to s3 documentation (Rémy Léone)
|
||||
* Add AWS endpoint eu-north-1 (weetmuts)
|
||||
* SFTP
|
||||
* Add support for PEM encrypted private keys (Fabian Möller)
|
||||
* Add option to force the usage of an ssh-agent (Fabian Möller)
|
||||
* Perform environment variable expansion on key-file (Fabian Möller)
|
||||
* Fix rmdir on Windows based servers (eg CrushFTP) (Nick Craig-Wood)
|
||||
* Fix rmdir deleting directory contents on some SFTP servers (Nick Craig-Wood)
|
||||
* Fix error on dangling symlinks (Nick Craig-Wood)
|
||||
* Swift
|
||||
* Add `--swift-no-chunk` to disable segmented uploads in rcat/mount (Nick Craig-Wood)
|
||||
* Introduce application credential auth support (kayrus)
|
||||
* Fix memory usage by slimming Object (Nick Craig-Wood)
|
||||
* Fix extra requests on upload (Nick Craig-Wood)
|
||||
* Fix reauth on big files (Nick Craig-Wood)
|
||||
* Union
|
||||
* Fix poll-interval not working (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Support About which means rclone mount will show the correct disk size (Nick Craig-Wood)
|
||||
* Support MD5 and SHA1 hashes with Owncloud and Nextcloud (Nick Craig-Wood)
|
||||
* Fail soft on time parsing errors (Nick Craig-Wood)
|
||||
* Fix infinite loop on failed directory creation (Nick Craig-Wood)
|
||||
* Fix identification of directories for Bitrix Site Manager (Nick Craig-Wood)
|
||||
* Fix upload of 0 length files on some servers (Nick Craig-Wood)
|
||||
* Fix if MKCOL fails with 423 Locked assume the directory exists (Nick Craig-Wood)
|
||||
|
||||
## v1.45 - 2018-11-24
|
||||
|
||||
* New backends
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone"
|
||||
slug: rclone
|
||||
url: /commands/rclone/
|
||||
@@ -26,283 +26,301 @@ rclone [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
-h, --help help for rclone
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
-h, --help help for rclone
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
@@ -355,4 +373,4 @@ rclone [flags]
|
||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone about"
|
||||
slug: rclone_about
|
||||
url: /commands/rclone_about/
|
||||
@@ -69,285 +69,303 @@ rclone about remote: [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone authorize"
|
||||
slug: rclone_authorize
|
||||
url: /commands/rclone_authorize/
|
||||
@@ -28,285 +28,303 @@ rclone authorize [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone cachestats"
|
||||
slug: rclone_cachestats
|
||||
url: /commands/rclone_cachestats/
|
||||
@@ -27,285 +27,303 @@ rclone cachestats source: [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone cat"
|
||||
slug: rclone_cat
|
||||
url: /commands/rclone_cat/
|
||||
@@ -49,285 +49,303 @@ rclone cat remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone check"
|
||||
slug: rclone_check
|
||||
url: /commands/rclone_check/
|
||||
@@ -43,285 +43,303 @@ rclone check source:path dest:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone cleanup"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
@@ -28,285 +28,303 @@ rclone cleanup remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config"
|
||||
slug: rclone_config
|
||||
url: /commands/rclone_config/
|
||||
@@ -28,281 +28,299 @@ rclone config [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
@@ -318,4 +336,4 @@ rclone config [flags]
|
||||
* [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote.
|
||||
* [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config create"
|
||||
slug: rclone_config_create
|
||||
url: /commands/rclone_config_create/
|
||||
@@ -19,6 +19,15 @@ you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken. Each time that happens rclone will print a message
|
||||
saying how to affect the value taken.
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local false
|
||||
|
||||
|
||||
```
|
||||
rclone config create <name> <type> [<key> <value>]* [flags]
|
||||
@@ -33,285 +42,303 @@ rclone config create <name> <type> [<key> <value>]* [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config delete"
|
||||
slug: rclone_config_delete
|
||||
url: /commands/rclone_config_delete/
|
||||
@@ -25,285 +25,303 @@ rclone config delete <name> [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config dump"
|
||||
slug: rclone_config_dump
|
||||
url: /commands/rclone_config_dump/
|
||||
@@ -25,285 +25,303 @@ rclone config dump [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config edit"
|
||||
slug: rclone_config_edit
|
||||
url: /commands/rclone_config_edit/
|
||||
@@ -28,285 +28,303 @@ rclone config edit [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config file"
|
||||
slug: rclone_config_file
|
||||
url: /commands/rclone_config_file/
|
||||
@@ -25,285 +25,303 @@ rclone config file [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config password"
|
||||
slug: rclone_config_password
|
||||
url: /commands/rclone_config_password/
|
||||
@@ -32,285 +32,303 @@ rclone config password <name> [<key> <value>]+ [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config providers"
|
||||
slug: rclone_config_providers
|
||||
url: /commands/rclone_config_providers/
|
||||
@@ -25,285 +25,303 @@ rclone config providers [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config show"
|
||||
slug: rclone_config_show
|
||||
url: /commands/rclone_config_show/
|
||||
@@ -25,285 +25,303 @@ rclone config show [<remote>] [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone config update"
|
||||
slug: rclone_config_update
|
||||
url: /commands/rclone_config_update/
|
||||
@@ -18,6 +18,11 @@ For example to update the env_auth field of a remote of name myremote you would
|
||||
|
||||
rclone config update myremote swift env_auth true
|
||||
|
||||
If the remote uses oauth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote swift env_auth true config_refresh_token false
|
||||
|
||||
|
||||
```
|
||||
rclone config update <name> [<key> <value>]+ [flags]
|
||||
@@ -32,285 +37,303 @@ rclone config update <name> [<key> <value>]+ [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone copy"
|
||||
slug: rclone_copy
|
||||
url: /commands/rclone_copy/
|
||||
@@ -47,6 +47,17 @@ written a trailing / - meaning "copy the contents of this directory".
|
||||
This applies to all commands and whether you are talking about the
|
||||
source or destination.
|
||||
|
||||
See the [--no-traverse](/docs/#no-traverse) option for controlling
|
||||
whether rclone lists the destination directory or not. Supplying this
|
||||
option when copying a small number of files into a large destination
|
||||
can speed transfers up greatly.
|
||||
|
||||
For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can to copy all the files which have
|
||||
changed recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
|
||||
@@ -63,285 +74,303 @@ rclone copy source:path dest:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone copyto"
|
||||
slug: rclone_copyto
|
||||
url: /commands/rclone_copyto/
|
||||
@@ -53,285 +53,303 @@ rclone copyto source:path dest:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone copyurl"
|
||||
slug: rclone_copyurl
|
||||
url: /commands/rclone_copyurl/
|
||||
@@ -28,285 +28,303 @@ rclone copyurl https://example.com dest:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone cryptcheck"
|
||||
slug: rclone_cryptcheck
|
||||
url: /commands/rclone_cryptcheck/
|
||||
@@ -53,285 +53,303 @@ rclone cryptcheck remote:path cryptedremote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone cryptdecode"
|
||||
slug: rclone_cryptdecode
|
||||
url: /commands/rclone_cryptdecode/
|
||||
@@ -37,285 +37,303 @@ rclone cryptdecode encryptedremote: encryptedfilename [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone dbhashsum"
|
||||
slug: rclone_dbhashsum
|
||||
url: /commands/rclone_dbhashsum/
|
||||
@@ -30,285 +30,303 @@ rclone dbhashsum remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone dedupe"
|
||||
slug: rclone_dedupe
|
||||
url: /commands/rclone_dedupe/
|
||||
@@ -106,285 +106,303 @@ rclone dedupe [mode] remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone delete"
|
||||
slug: rclone_delete
|
||||
url: /commands/rclone_delete/
|
||||
@@ -46,285 +46,303 @@ rclone delete remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone deletefile"
|
||||
slug: rclone_deletefile
|
||||
url: /commands/rclone_deletefile/
|
||||
@@ -29,285 +29,303 @@ rclone deletefile remote:path [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone genautocomplete"
|
||||
slug: rclone_genautocomplete
|
||||
url: /commands/rclone_genautocomplete/
|
||||
@@ -24,281 +24,299 @@ Run with --help to list the supported shells.
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
@@ -307,4 +325,4 @@ Run with --help to list the supported shells.
|
||||
* [rclone genautocomplete bash](/commands/rclone_genautocomplete_bash/) - Output bash completion script for rclone.
|
||||
* [rclone genautocomplete zsh](/commands/rclone_genautocomplete_zsh/) - Output zsh completion script for rclone.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone genautocomplete bash"
|
||||
slug: rclone_genautocomplete_bash
|
||||
url: /commands/rclone_genautocomplete_bash/
|
||||
@@ -40,285 +40,303 @@ rclone genautocomplete bash [output_file] [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2018-11-24T13:43:29Z
|
||||
date: 2019-02-09T10:42:18Z
|
||||
title: "rclone genautocomplete zsh"
|
||||
slug: rclone_genautocomplete_zsh
|
||||
url: /commands/rclone_genautocomplete_zsh/
|
||||
@@ -40,285 +40,303 @@ rclone genautocomplete zsh [output_file] [flags]
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size int In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump string List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-pass string Password.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-user string User Name
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size int Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer int Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size int Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Obsolete - does nothing.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 2)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 40)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff int Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.45")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--acd-auth-url string Auth server URL.
|
||||
--acd-client-id string Amazon Application Client ID.
|
||||
--acd-client-secret string Amazon Application Client Secret.
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
|
||||
--acd-token-url string Token server url.
|
||||
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
|
||||
--alias-remote string Remote or path to alias.
|
||||
--ask-password Allow prompt for password for encrypted configuration. (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
|
||||
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
|
||||
--azureblob-list-chunk int Size of blob list. (default 5000)
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-endpoint string Endpoint for the service.
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
|
||||
--b2-key string Application Key
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
|
||||
--b2-versions Include old versions in directory listings.
|
||||
--backup-dir string Make backups into hierarchy based in DIR.
|
||||
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
|
||||
--box-client-id string Box App Client Id.
|
||||
--box-client-secret string Box App Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
|
||||
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
|
||||
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
|
||||
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
|
||||
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
|
||||
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
|
||||
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
|
||||
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
|
||||
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
|
||||
--cache-db-purge Clear all the cached data for this remote on start.
|
||||
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
|
||||
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
|
||||
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
|
||||
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
|
||||
--cache-plex-password string The password of the Plex user
|
||||
--cache-plex-url string The URL of the Plex server
|
||||
--cache-plex-username string The username of the Plex user
|
||||
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
|
||||
--cache-remote string Remote to cache.
|
||||
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
|
||||
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
|
||||
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
|
||||
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
|
||||
--cache-writes Cache file data on writes through the FS
|
||||
--checkers int Number of checkers to run in parallel. (default 8)
|
||||
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
|
||||
--config string Config file. (default "/home/ncw/.rclone.conf")
|
||||
--contimeout duration Connect timeout (default 1m0s)
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item.
|
||||
--cpuprofile string Write cpu profile to file
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
|
||||
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
|
||||
--crypt-password string Password or pass phrase for encryption.
|
||||
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
|
||||
--crypt-remote string Remote to encrypt/decrypt.
|
||||
--crypt-show-mapping For all files listed show how the names encrypt.
|
||||
--delete-after When synchronizing, delete files on destination after transferring (default)
|
||||
--delete-before When synchronizing, delete files on destination before transferring
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--disable string Disable a comma separated list of features. Use help to see a list.
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
|
||||
--drive-alternate-export Use alternate export URLs for google documents export.,
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string Google Application Client Secret
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: see export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account.
|
||||
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
|
||||
--drive-keep-revision-forever Keep new head revision of each file forever.
|
||||
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
|
||||
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
|
||||
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
|
||||
--drive-root-folder-id string ID of the root folder
|
||||
--drive-scope string Scope that rclone should use when requesting access from drive.
|
||||
--drive-service-account-credentials string Service Account Credentials JSON blob
|
||||
--drive-service-account-file string Service Account Credentials JSON file path
|
||||
--drive-shared-with-me Only show files that are shared with me.
|
||||
--drive-skip-gdocs Skip google documents in all listings.
|
||||
--drive-team-drive string ID of the Team Drive
|
||||
--drive-trashed-only Only show files that are in the trash.
|
||||
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
|
||||
--drive-use-created-date Use file created date instead of modified date.,
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
|
||||
--dropbox-client-id string Dropbox App Client Id
|
||||
--dropbox-client-secret string Dropbox App Client Secret
|
||||
--dropbox-impersonate string Impersonate this user when using a business account.
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
|
||||
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
|
||||
--dump-headers Dump HTTP bodies - may contain sensitive info
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read exclude patterns from file
|
||||
--exclude-if-present string Exclude directories if filename is present
|
||||
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
|
||||
--files-from stringArray Read list of source-file names from file
|
||||
-f, --filter stringArray Add a file-filtering rule
|
||||
--filter-from stringArray Read filtering patterns from a file
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-pass string FTP password
|
||||
--ftp-port string FTP port, leave blank to use default (21)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--gcs-bucket-acl string Access Control List for new buckets.
|
||||
--gcs-client-id string Google Application Client Id
|
||||
--gcs-client-secret string Google Application Client Secret
|
||||
--gcs-location string Location for the newly created buckets.
|
||||
--gcs-object-acl string Access Control List for new objects.
|
||||
--gcs-project-number string Project number.
|
||||
--gcs-service-account-file string Service Account Credentials JSON file path
|
||||
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--hubic-client-id string Hubic Client Id
|
||||
--hubic-client-secret string Hubic Client Secret
|
||||
--hubic-no-chunk Don't chunk files during streaming upload.
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--ignore-checksum Skip post copy check of checksums.
|
||||
--ignore-errors delete even if there are I/O errors
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use mod-time or checksum.
|
||||
-I, --ignore-times Don't skip files that match size and time - transfer all files
|
||||
--immutable Do not modify files. Fail if existing files have been modified.
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read include patterns from file
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
|
||||
--jottacloud-mountpoint string The mountpoint to use.
|
||||
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
|
||||
--jottacloud-user string User Name:
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
|
||||
--local-nounc string Disable UNC (long path names) conversion on Windows
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
|
||||
--low-level-retries int Number of low level retries to do. (default 10)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-depth int If set limits the recursion depth to this. (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
|
||||
--mega-debug Output more debug from Mega.
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash.
|
||||
--mega-pass string Password.
|
||||
--mega-user string User name
|
||||
--memprofile string Write memory profile to file
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
|
||||
--modify-window duration Max time diff to be considered the same (default 1ns)
|
||||
--no-check-certificate Do not verify the server SSL certificate. Insecure.
|
||||
--no-gzip-encoding Don't set Accept-Encoding: gzip.
|
||||
--no-traverse Don't traverse destination file system on copy.
|
||||
--no-update-modtime Don't update destination mod-time if files identical.
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
|
||||
--onedrive-client-id string Microsoft App Client Id
|
||||
--onedrive-client-secret string Microsoft App Client Secret
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
|
||||
--opendrive-password string Password.
|
||||
--opendrive-username string Username
|
||||
--pcloud-client-id string Pcloud App Client Id
|
||||
--pcloud-client-secret string Pcloud App Client Secret
|
||||
-P, --progress Show progress during transfer.
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
|
||||
--qingstor-connection-retries int Number of connection retries. (default 3)
|
||||
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
|
||||
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
|
||||
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--qingstor-zone string Zone to connect to.
|
||||
-q, --quiet Print as little stuff as possible
|
||||
--rc Enable the remote control server.
|
||||
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
|
||||
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
|
||||
--rc-client-ca string Client certificate authority to verify clients with
|
||||
--rc-files string Path to local files to serve on the HTTP server.
|
||||
--rc-htpasswd string htpasswd file - if not provided no authentication is done
|
||||
--rc-key string SSL PEM Private key
|
||||
--rc-max-header-bytes int Maximum size of request header (default 4096)
|
||||
--rc-no-auth Don't require auth for certain methods.
|
||||
--rc-pass string Password for authentication.
|
||||
--rc-realm string realm for authentication (default "rclone")
|
||||
--rc-serve Enable the serving of remote objects.
|
||||
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-user string User name for authentication.
|
||||
--retries int Retry operations this many times if they fail (default 3)
|
||||
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
|
||||
--s3-access-key-id string AWS Access Key ID.
|
||||
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
|
||||
--s3-bucket-acl string Canned ACL used when creating buckets.
|
||||
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-endpoint string Endpoint for S3 API.
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region.
|
||||
--s3-provider string Choose your S3 provider.
|
||||
--s3-region string Region to connect to.
|
||||
--s3-secret-access-key string AWS Secret Access Key (password)
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
|
||||
--s3-storage-class string The storage class to use when storing new objects in S3.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
|
||||
--s3-v2-auth If true use v2 authentication.
|
||||
--sftp-ask-password Allow asking for SFTP password when needed.
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
|
||||
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
|
||||
--sftp-key-use-agent When set forces the usage of the ssh-agent.
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent.
|
||||
--sftp-path-override string Override path used by SSH connection.
|
||||
--sftp-port string SSH port, leave blank to use default (22)
|
||||
--sftp-set-modtime Set the modified time on the remote if set. (default true)
|
||||
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
|
||||
--sftp-user string SSH username, leave blank for current username, ncw
|
||||
--size-only Skip based on size only, not mod-time or checksum
|
||||
--skip-links Don't warn about skipped symlinks.
|
||||
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
|
||||
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
|
||||
--stats-one-line Make the stats fit on one line.
|
||||
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
|
||||
--suffix string Suffix for use with --backup-dir.
|
||||
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
|
||||
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
|
||||
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
|
||||
--swift-auth string Authentication URL for server (OS_AUTH_URL).
|
||||
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
|
||||
--swift-key string API key or password (OS_PASSWORD).
|
||||
--swift-no-chunk Don't chunk files during streaming upload.
|
||||
--swift-region string Region name - optional (OS_REGION_NAME)
|
||||
--swift-storage-policy string The storage policy to use when creating a new container
|
||||
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
|
||||
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
|
||||
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME).
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
|
||||
--syslog Use Syslog for logging
|
||||
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
|
||||
--timeout duration IO idle timeout (default 5m0s)
|
||||
--tpslimit float Limit HTTP transactions per second to this.
|
||||
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
|
||||
--track-renames When synchronizing, track file renames and do a server side move if possible
|
||||
--transfers int Number of file transfers to run in parallel. (default 4)
|
||||
--union-remotes string List of space separated remotes.
|
||||
-u, --update Skip files that are newer on the destination.
|
||||
--use-cookies Enable session cookiejar.
|
||||
--use-mmap Use mmap allocator (see docs).
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
|
||||
--webdav-pass string Password.
|
||||
--webdav-url string URL of http host to connect to
|
||||
--webdav-user string User name
|
||||
--webdav-vendor string Name of the Webdav site/service/software you are using
|
||||
--yandex-client-id string Yandex Client Id
|
||||
--yandex-client-secret string Yandex Client Secret
|
||||
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [rclone genautocomplete](/commands/rclone_genautocomplete/) - Output completion script for a given shell.
|
||||
|
||||
###### Auto generated by spf13/cobra on 24-Nov-2018
|
||||
###### Auto generated by spf13/cobra on 9-Feb-2019
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user