1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-10 12:33:46 +00:00

Compare commits

..

1 Commits

747 changed files with 39549 additions and 101855 deletions

View File

@@ -1,5 +1,9 @@
# golangci-lint configuration options # golangci-lint configuration options
run:
build-tags:
- cmount
linters: linters:
enable: enable:
- deadcode - deadcode

View File

@@ -1,7 +1,7 @@
--- ---
language: go language: go
sudo: required sudo: required
dist: xenial dist: trusty
os: os:
- linux - linux
go_import_path: github.com/ncw/rclone go_import_path: github.com/ncw/rclone
@@ -49,6 +49,9 @@ matrix:
allow_failures: allow_failures:
- go: tip - go: tip
include: include:
- go: 1.8.x
script:
- make quicktest
- go: 1.9.x - go: 1.9.x
script: script:
- make quicktest - make quicktest

View File

@@ -135,7 +135,7 @@ then change into the project root and run
make test make test
This command is run daily on the integration test server. You can This command is run daily on the the integration test server. You can
find the results at https://pub.rclone.org/integration-tests/ find the results at https://pub.rclone.org/integration-tests/
## Code Organisation ## ## Code Organisation ##

File diff suppressed because it is too large Load Diff

692
MANUAL.md

File diff suppressed because it is too large Load Diff

3775
MANUAL.txt

File diff suppressed because it is too large Load Diff

View File

@@ -24,7 +24,6 @@ BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
# Pass in GOTAGS=xyz on the make command line to set build tags # Pass in GOTAGS=xyz on the make command line to set build tags
ifdef GOTAGS ifdef GOTAGS
BUILDTAGS=-tags "$(GOTAGS)" BUILDTAGS=-tags "$(GOTAGS)"
LINTTAGS=--build-tags "$(GOTAGS)"
endif endif
.PHONY: rclone vars version .PHONY: rclone vars version
@@ -61,8 +60,11 @@ racequicktest:
# Do source code quality checks # Do source code quality checks
check: rclone check: rclone
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
@# see: https://github.com/golangci/golangci-lint/issues/204
@echo "-- START CODE QUALITY REPORT -------------------------------" @echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./... @go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
@golangci-lint run ./...
@echo "-- END CODE QUALITY REPORT ---------------------------------" @echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies # Get the build dependencies
@@ -98,7 +100,7 @@ commanddocs: rclone
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/ XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
backenddocs: rclone bin/make_backend_docs.py backenddocs: rclone bin/make_backend_docs.py
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py ./bin/make_backend_docs.py
rcdocs: rclone rcdocs: rclone
bin/make_rc_docs.sh bin/make_rc_docs.sh
@@ -191,7 +193,7 @@ endif
# Fetch the binary builds from travis and appveyor # Fetch the binary builds from travis and appveyor
fetch_binaries: fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ rclone -P sync $(BETA_UPLOAD) build/
serve: website serve: website
cd docs && hugo server -v -w cd docs && hugo server -v -w

View File

@@ -7,11 +7,11 @@
[Changelog](https://rclone.org/changelog/) | [Changelog](https://rclone.org/changelog/) |
[Installation](https://rclone.org/install/) | [Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/) | [Forum](https://forum.rclone.org/) |
[G+](https://google.com/+RcloneOrg)
[![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone) [![Build Status](https://travis-ci.org/ncw/rclone.svg?branch=master)](https://travis-ci.org/ncw/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone) [![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/ncw/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/ncw/rclone)
[![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master) [![CircleCI](https://circleci.com/gh/ncw/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/ncw/rclone/tree/master)
[![Go Report Card](https://goreportcard.com/badge/github.com/ncw/rclone)](https://goreportcard.com/report/github.com/ncw/rclone)
[![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone) [![GoDoc](https://godoc.org/github.com/ncw/rclone?status.svg)](https://godoc.org/github.com/ncw/rclone)
# Rclone # Rclone
@@ -73,8 +73,6 @@ Please see [the full list of all storage providers and their features](https://r
* Optional encryption ([Crypt](https://rclone.org/crypt/)) * Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/)) * Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
## Installation & documentation ## Installation & documentation

View File

@@ -11,7 +11,7 @@ Making a release
* edit docs/content/changelog.md * edit docs/content/changelog.md
* make doc * make doc
* git status - to check for new man pages - git add them * git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0" * git commit -a -v -m "Version v1.XX"
* make retag * make retag
* git push --tags origin master * git push --tags origin master
* # Wait for the appveyor and travis builds to complete then... * # Wait for the appveyor and travis builds to complete then...
@@ -27,7 +27,6 @@ Making a release
Early in the next release cycle update the vendored dependencies Early in the next release cycle update the vendored dependencies
* Review any pinned packages in go.mod and remove if possible * Review any pinned packages in go.mod and remove if possible
* GO111MODULE=on go get -u github.com/spf13/cobra@master
* make update * make update
* git status * git status
* git add new files * git add new files

View File

@@ -14,7 +14,7 @@ import (
func init() { func init() {
fsi := &fs.RegInfo{ fsi := &fs.RegInfo{
Name: "alias", Name: "alias",
Description: "Alias for an existing remote", Description: "Alias for a existing remote",
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "remote", Name: "remote",

View File

@@ -32,6 +32,7 @@ import (
"github.com/ncw/rclone/lib/dircache" "github.com/ncw/rclone/lib/dircache"
"github.com/ncw/rclone/lib/oauthutil" "github.com/ncw/rclone/lib/oauthutil"
"github.com/ncw/rclone/lib/pacer" "github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
@@ -1092,7 +1093,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
if !bigObject { if !bigObject {
in, resp, err = file.OpenHeaders(headers) in, resp, err = file.OpenHeaders(headers)
} else { } else {
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers) in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
} }
return o.fs.shouldRetry(resp, err) return o.fs.shouldRetry(resp, err)
}) })

View File

@@ -1,6 +1,6 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system // Package azureblob provides an interface to the Microsoft Azure blob object storage system
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob

View File

@@ -1,4 +1,4 @@
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob

View File

@@ -1,6 +1,6 @@
// Test AzureBlob filesystem interface // Test AzureBlob filesystem interface
// +build !plan9,!solaris // +build !plan9,!solaris,go1.8
package azureblob package azureblob

View File

@@ -1,6 +1,6 @@
// Build for azureblob for unsupported platforms to stop go complaining // Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 solaris // +build plan9 solaris !go1.8
package azureblob package azureblob

View File

@@ -1213,7 +1213,7 @@ func (o *Object) parseTimeString(timeString string) (err error) {
unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64) unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
if err != nil { if err != nil {
fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err) fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
return nil return err
} }
o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC() o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
return nil return nil

View File

@@ -15,9 +15,7 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
}) })
} }

View File

@@ -169,10 +169,23 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
WriteMimeType: false, WriteMimeType: false,
BucketBased: true, BucketBased: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs) }).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
doChangeNotify := wrappedFs.Features().ChangeNotify
if doChangeNotify != nil {
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
decrypted, err := f.DecryptFileName(path)
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
doChangeNotify(wrappedNotifyFunc, pollInterval)
}
}
return f, err return f, err
} }
@@ -189,7 +202,6 @@ type Options struct {
// Fs represents a wrapped fs.Fs // Fs represents a wrapped fs.Fs
type Fs struct { type Fs struct {
fs.Fs fs.Fs
wrapper fs.Fs
name string name string
root string root string
opt Options opt Options
@@ -532,16 +544,6 @@ func (f *Fs) UnWrap() fs.Fs {
return f.Fs return f.Fs
} }
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// EncryptFileName returns an encrypted file name // EncryptFileName returns an encrypted file name
func (f *Fs) EncryptFileName(fileName string) string { func (f *Fs) EncryptFileName(fileName string) string {
return f.cipher.EncryptFileName(fileName) return f.cipher.EncryptFileName(fileName)
@@ -614,75 +616,6 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
return m.Sums()[hashType], nil return m.Sums()[hashType], nil
} }
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
do := f.Fs.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
out[i] = fs.NewDirCopy(dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
}
return do(out)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.Fs.Features().DirCacheFlush
if do != nil {
do()
}
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(remote string) (string, error) {
do := f.Fs.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
o, err := f.NewObject(remote)
if err != nil {
// assume it is a directory
return do(f.cipher.EncryptDirName(remote))
}
return do(o.(*Object).Object.Remote())
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
do := f.Fs.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
var (
err error
decrypted string
)
switch entryType {
case fs.EntryDirectory:
decrypted, err = f.cipher.DecryptDirName(path)
case fs.EntryObject:
decrypted, err = f.cipher.DecryptFileName(path)
default:
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
return
}
if err != nil {
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
return
}
notifyFunc(decrypted, entryType)
}
do(wrappedNotifyFunc, pollIntervalChan)
}
// Object describes a wrapped for being read from the Fs // Object describes a wrapped for being read from the Fs
// //
// This decrypts the remote name and decrypts the data // This decrypts the remote name and decrypts the data
@@ -841,34 +774,6 @@ func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
return "", nil return "", nil
} }
// ID returns the ID of the Object if known, or "" if not
func (o *Object) ID() string {
do, ok := o.Object.(fs.IDer)
if !ok {
return ""
}
return do.ID()
}
// SetTier performs changing storage tier of the Object if
// multiple storage classes supported
func (o *Object) SetTier(tier string) error {
do, ok := o.Object.(fs.SetTierer)
if !ok {
return errors.New("crypt: underlying remote does not support SetTier")
}
return do.SetTier(tier)
}
// GetTier returns storage tier or class of the Object
func (o *Object) GetTier() string {
do, ok := o.Object.(fs.GetTierer)
if !ok {
return ""
}
return do.GetTier()
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)
@@ -882,15 +787,7 @@ var (
_ fs.UnWrapper = (*Fs)(nil) _ fs.UnWrapper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil) _ fs.ListRer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil) _ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ObjectInfo = (*ObjectInfo)(nil) _ fs.ObjectInfo = (*ObjectInfo)(nil)
_ fs.Object = (*Object)(nil) _ fs.Object = (*Object)(nil)
_ fs.ObjectUnWrapper = (*Object)(nil) _ fs.ObjectUnWrapper = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
_ fs.SetTierer = (*Object)(nil)
_ fs.GetTierer = (*Object)(nil)
) )

View File

@@ -21,10 +21,8 @@ func TestIntegration(t *testing.T) {
t.Skip("Skipping as -remote not set") t.Skip("Skipping as -remote not set")
} }
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -44,8 +42,6 @@ func TestStandard(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -65,8 +61,6 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -86,8 +80,6 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }

View File

@@ -1,4 +1,7 @@
// Package drive interfaces with the Google Drive object storage system // Package drive interfaces with the Google Drive object storage system
// +build go1.9
package drive package drive
// FIXME need to deal with some corner cases // FIXME need to deal with some corner cases
@@ -1869,24 +1872,16 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
remote = remote[:len(remote)-len(ext)] remote = remote[:len(remote)-len(ext)]
} }
// Look to see if there is an existing object
existingObject, _ := f.NewObject(remote)
createInfo, err := f.createFileInfo(remote, src.ModTime()) createInfo, err := f.createFileInfo(remote, src.ModTime())
if err != nil { if err != nil {
return nil, err return nil, err
} }
supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
if err != nil {
return nil, err
}
var info *drive.File var info *drive.File
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(srcObj.id, createInfo). info, err = f.svc.Files.Copy(srcObj.id, createInfo).
Fields(partialFields). Fields(partialFields).
SupportsTeamDrives(supportTeamDrives). SupportsTeamDrives(f.isTeamDrive).
KeepRevisionForever(f.opt.KeepRevisionForever). KeepRevisionForever(f.opt.KeepRevisionForever).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
@@ -1894,17 +1889,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
newObject, err := f.newObjectWithInfo(remote, info) return f.newObjectWithInfo(remote, info)
if err != nil {
return nil, err
}
if existingObject != nil {
err = existingObject.Remove()
if err != nil {
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
return newObject, nil
} }
// Purge deletes all the files and the container // Purge deletes all the files and the container
@@ -2030,11 +2015,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
dstParents := strings.Join(dstInfo.Parents, ",") dstParents := strings.Join(dstInfo.Parents, ",")
dstInfo.Parents = nil dstInfo.Parents = nil
supportTeamDrives, err := f.ShouldSupportTeamDrives(src)
if err != nil {
return nil, err
}
// Do the move // Do the move
var info *drive.File var info *drive.File
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
@@ -2042,7 +2022,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
RemoveParents(srcParentID). RemoveParents(srcParentID).
AddParents(dstParents). AddParents(dstParents).
Fields(partialFields). Fields(partialFields).
SupportsTeamDrives(supportTeamDrives). SupportsTeamDrives(f.isTeamDrive).
Do() Do()
return shouldRetry(err) return shouldRetry(err)
}) })
@@ -2053,20 +2033,6 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, info) return f.newObjectWithInfo(remote, info)
} }
// ShouldSupportTeamDrives returns the request should support TeamDrives
func (f *Fs) ShouldSupportTeamDrives(src fs.Object) (bool, error) {
srcIsTeamDrive := false
if srcFs, ok := src.Fs().(*Fs); ok {
srcIsTeamDrive = srcFs.isTeamDrive
}
if f.isTeamDrive {
return true, nil
}
return srcIsTeamDrive, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder. // PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(remote string) (link string, err error) { func (f *Fs) PublicLink(remote string) (link string, err error) {
id, err := f.dirCache.FindDir(remote, false) id, err := f.dirCache.FindDir(remote, false)

View File

@@ -1,3 +1,5 @@
// +build go1.9
package drive package drive
import ( import (

View File

@@ -1,5 +1,7 @@
// Test Drive filesystem interface // Test Drive filesystem interface
// +build go1.9
package drive package drive
import ( import (

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package drive

View File

@@ -8,6 +8,8 @@
// //
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS // This contains code adapted from google.golang.org/api (C) the GO AUTHORS
// +build go1.9
package drive package drive
import ( import (

View File

@@ -345,36 +345,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if err != nil { if err != nil {
return nil, errors.Wrap(err, "list") return nil, errors.Wrap(err, "list")
} }
files, err := c.List(path.Join(f.root, dir))
var listErr error f.putFtpConnection(&c, err)
var files []*ftp.Entry if err != nil {
return nil, translateErrorDir(err)
resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1)
go func() {
result, err := c.List(path.Join(f.root, dir))
f.putFtpConnection(&c, err)
if err != nil {
errchan <- err
return
}
resultchan <- result
}()
// Wait for List for up to Timeout seconds
timer := time.NewTimer(fs.Config.Timeout)
select {
case listErr = <-errchan:
timer.Stop()
return nil, translateErrorDir(listErr)
case files = <-resultchan:
timer.Stop()
case <-timer.C:
// if timer fired assume no error but connection dead
fs.Errorf(f, "Timeout when waiting for List")
return nil, errors.New("Timeout when waiting for List")
} }
// Annoyingly FTP returns success for a directory which // Annoyingly FTP returns success for a directory which
// doesn't exist, so check it really doesn't exist if no // doesn't exist, so check it really doesn't exist if no
// entries found. // entries found.

View File

@@ -1,4 +1,7 @@
// Package googlecloudstorage provides an interface to Google Cloud Storage // Package googlecloudstorage provides an interface to Google Cloud Storage
// +build go1.9
package googlecloudstorage package googlecloudstorage
/* /*

View File

@@ -1,5 +1,7 @@
// Test GoogleCloudStorage filesystem interface // Test GoogleCloudStorage filesystem interface
// +build go1.9
package googlecloudstorage_test package googlecloudstorage_test
import ( import (

View File

@@ -0,0 +1,6 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build !go1.9
package googlecloudstorage

View File

@@ -1,3 +1,5 @@
// +build go1.8
package http package http
import ( import (

View File

@@ -11,9 +11,7 @@ import (
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:", RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil), NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
}) })
} }

View File

@@ -314,9 +314,3 @@ type UploadResponse struct {
Deleted interface{} `json:"deleted"` Deleted interface{} `json:"deleted"`
Mime string `json:"mime"` Mime string `json:"mime"`
} }
// DeviceRegistrationResponse is the response to registering a device
type DeviceRegistrationResponse struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
}

View File

@@ -8,7 +8,6 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@@ -41,21 +40,15 @@ const (
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta" defaultDevice = "Jotta"
defaultMountpoint = "Archive" defaultMountpoint = "Sync" // nolint
rootURL = "https://www.jottacloud.com/jfs/" rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/files/v1/" apiURL = "https://api.jottacloud.com/files/v1/"
baseURL = "https://www.jottacloud.com/" baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token" tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-" cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40" rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configUsername = "user" configUsername = "user"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
) )
var ( var (
@@ -65,13 +58,14 @@ var (
AuthURL: tokenURL, AuthURL: tokenURL,
TokenURL: tokenURL, TokenURL: tokenURL,
}, },
RedirectURL: oauthutil.RedirectLocalhostURL, ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
} }
) )
// Register with Fs // Register with Fs
func init() { func init() {
// needs to be done early so we can use oauth during config
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "jottacloud", Name: "jottacloud",
Description: "JottaCloud", Description: "JottaCloud",
@@ -85,62 +79,14 @@ func init() {
} }
} }
srv := rest.NewClient(fshttp.NewClient(fs.Config))
// ask if we should create a device specifc token: https://github.com/ncw/rclone/issues/2995
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
// all information comes from https://github.com/ttyridal/aiojotta/wiki/Jotta-protocol-3.-Authentication#token-authentication
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration api.DeviceRegistrationResponse
_, err := srv.CallJSON(&opts, nil, &deviceRegistration)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
username, ok := m.Get(configUsername) username, ok := m.Get(configUsername)
if !ok { if !ok {
log.Fatalf("No username defined") log.Fatalf("No username defined")
} }
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.") password := config.GetPassword("Your Jottacloud password is only required during config and will not be stored.")
// prepare out token request with username and password // prepare out token request with username and password
srv := rest.NewClient(fshttp.NewClient(fs.Config))
values := url.Values{} values := url.Values{}
values.Set("grant_type", "PASSWORD") values.Set("grant_type", "PASSWORD")
values.Set("password", password) values.Set("password", password)
@@ -160,7 +106,7 @@ func init() {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header // if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil { if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" { if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n") fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ") fmt.Printf("Enter verification code> ")
authCode := config.ReadLine() authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number authCode = strings.Replace(authCode, "-", "", -1) // the sms received contains a pair of 3 digit numbers seperated by '-' but wants a single 6 digit number
@@ -183,49 +129,23 @@ func init() {
// finally save them in the config // finally save them in the config
err = oauthutil.PutToken(name, m, &token, true) err = oauthutil.PutToken(name, m, &token, true)
if err != nil { if err != nil {
log.Fatalf("Error while saving token: %s", err) log.Fatalf("Error while setting token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
acc, err := getAccountInfo(srv, username)
if err != nil {
log.Fatalf("Error getting devices: %s", err)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
result := config.Choose("Devices", deviceNames, nil, false)
m.Set(configDevice, result)
dev, err := getDeviceInfo(srv, path.Join(username, result))
if err != nil {
log.Fatalf("Error getting Mountpoint: %s", err)
}
if len(dev.MountPoints) == 0 {
log.Fatalf("No Mountpoints found for this device.")
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
result = config.Choose("Mountpoints", mountpointNames, nil, false)
m.Set(configMountpoint, result)
} }
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: configUsername, Name: configUsername,
Help: "User Name:", Help: "User Name:",
}, {
Name: "mountpoint",
Help: "The mountpoint to use.",
Required: true,
Examples: []fs.OptionExample{{
Value: "Sync",
Help: "Will be synced by the official client.",
}, {
Value: "Archive",
Help: "Archive",
}},
}, { }, {
Name: "md5_memory_limit", Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.", Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -253,7 +173,6 @@ func init() {
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
User string `config:"user"` User string `config:"user"`
Device string `config:"device"`
Mountpoint string `config:"mountpoint"` Mountpoint string `config:"mountpoint"`
MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"` MD5MemoryThreshold fs.SizeSuffix `config:"md5_memory_limit"`
HardDelete bool `config:"hard_delete"` HardDelete bool `config:"hard_delete"`
@@ -361,31 +280,18 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.JottaFile, err error) {
return &result, nil return &result, nil
} }
// getAccountInfo queries general information about the account. // getAccountInfo retrieves account information
// Takes rest.Client and username as parameter to be easily usable func (f *Fs) getAccountInfo() (info *api.AccountInfo, err error) {
// during config
func getAccountInfo(srv *rest.Client, username string) (info *api.AccountInfo, err error) {
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: urlPathEscape(username), Path: urlPathEscape(f.user),
} }
_, err = srv.CallXML(&opts, nil, &info) var resp *http.Response
if err != nil { err = f.pacer.Call(func() (bool, error) {
return nil, err resp, err = f.srv.CallXML(&opts, nil, &info)
} return shouldRetry(resp, err)
})
return info, nil
}
// getDeviceInfo queries Information about a jottacloud device
func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err error) {
opts := rest.Opts{
Method: "GET",
Path: urlPathEscape(path),
}
_, err = srv.CallXML(&opts, nil, &info)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -394,18 +300,12 @@ func getDeviceInfo(srv *rest.Client, path string) (info *api.JottaDevice, err er
} }
// setEndpointUrl reads the account id and generates the API endpoint URL // setEndpointUrl reads the account id and generates the API endpoint URL
func (f *Fs) setEndpointURL() (err error) { func (f *Fs) setEndpointURL(mountpoint string) (err error) {
info, err := getAccountInfo(f.srv, f.user) info, err := f.getAccountInfo()
if err != nil { if err != nil {
return errors.Wrap(err, "failed to get endpoint url") return errors.Wrap(err, "failed to get endpoint url")
} }
if f.opt.Device == "" { f.endpointURL = urlPathEscape(path.Join(info.Username, defaultDevice, mountpoint))
f.opt.Device = defaultDevice
}
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = urlPathEscape(path.Join(info.Username, f.opt.Device, f.opt.Mountpoint))
return nil return nil
} }
@@ -481,16 +381,8 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
rootIsDir := strings.HasSuffix(root, "/") rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root) root = parsePath(root)
clientID, ok := m.Get(configClientID) // add jottacloud to the long list of sites that don't follow the oauth spec correctly
if !ok { oauth2.RegisterBrokenAuthHeaderProvider("https://www.jottacloud.com/")
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs // the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above) // a filter to fix the grant_type issues (see above)
@@ -530,7 +422,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return err return err
}) })
err = f.setEndpointURL() err = f.setEndpointURL(opt.Mountpoint)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't get account info") return nil, errors.Wrap(err, "couldn't get account info")
} }
@@ -788,9 +680,6 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.Device != "Jotta" {
return nil, errors.New("upload not supported for devices other than Jotta")
}
o := f.createObject(src.Remote(), src.ModTime(), src.Size()) o := f.createObject(src.Remote(), src.ModTime(), src.Size())
return o, o.Update(in, src, options...) return o, o.Update(in, src, options...)
} }
@@ -1054,7 +943,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
// About gets quota information // About gets quota information
func (f *Fs) About() (*fs.Usage, error) { func (f *Fs) About() (*fs.Usage, error) {
info, err := getAccountInfo(f.srv, f.user) info, err := f.getAccountInfo()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -28,9 +28,8 @@ import (
) )
// Constants // Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
// Register with Fs // Register with Fs
func init() { func init() {
@@ -328,14 +327,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fd, err := os.Open(fsDirPath) fd, err := os.Open(fsDirPath)
if err != nil { if err != nil {
isPerm := os.IsPermission(err) return nil, errors.Wrapf(err, "failed to open directory %q", dir)
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
accounting.Stats.Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
} }
defer func() { defer func() {
cerr := fd.Close() cerr := fd.Close()
@@ -345,38 +337,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
}() }()
for { for {
var fis []os.FileInfo fis, err := fd.Readdir(1024)
if useReadDir { if err == io.EOF && len(fis) == 0 {
// Windows and Plan9 read the directory entries with the stat information in which break
// shouldn't fail because of unreadable entries.
fis, err = fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
} else {
// For other OSes we read the names only (which shouldn't fail) then stat the
// individual ourselves so we can log errors but not fail the directory read.
var names []string
names, err = fd.Readdirnames(1024)
if err == io.EOF && len(names) == 0 {
break
}
if err == nil {
for _, name := range names {
namepath := filepath.Join(fsDirPath, name)
fi, fierr := os.Lstat(namepath)
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
accounting.Stats.Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
}
}
} }
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to read directory entry") return nil, errors.Wrapf(err, "failed to read directory %q", dir)
} }
for _, fi := range fis { for _, fi := range fis {
@@ -743,10 +709,9 @@ func (o *Object) Hash(r hash.Type) (string, error) {
o.fs.objectHashesMu.Lock() o.fs.objectHashesMu.Lock()
hashes := o.hashes hashes := o.hashes
hashValue, hashFound := o.hashes[r]
o.fs.objectHashesMu.Unlock() o.fs.objectHashesMu.Unlock()
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound { if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
var in io.ReadCloser var in io.ReadCloser
if !o.translatedLink { if !o.translatedLink {
@@ -757,7 +722,7 @@ func (o *Object) Hash(r hash.Type) (string, error) {
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to open") return "", errors.Wrap(err, "hash: failed to open")
} }
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r)) hashes, err = hash.Stream(in)
closeErr := in.Close() closeErr := in.Close()
if err != nil { if err != nil {
return "", errors.Wrap(err, "hash: failed to read") return "", errors.Wrap(err, "hash: failed to read")
@@ -765,16 +730,11 @@ func (o *Object) Hash(r hash.Type) (string, error) {
if closeErr != nil { if closeErr != nil {
return "", errors.Wrap(closeErr, "hash: failed to close") return "", errors.Wrap(closeErr, "hash: failed to close")
} }
hashValue = hashes[r]
o.fs.objectHashesMu.Lock() o.fs.objectHashesMu.Lock()
if o.hashes == nil { o.hashes = hashes
o.hashes = hashes
} else {
o.hashes[r] = hashValue
}
o.fs.objectHashesMu.Unlock() o.fs.objectHashesMu.Unlock()
} }
return hashValue, nil return hashes[r], nil
} }
// Size returns the size of an object in bytes // Size returns the size of an object in bytes
@@ -1038,36 +998,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
return o.lstat() return o.lstat()
} }
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(remote string, size int64) (fs.WriterAtCloser, error) {
// Temporary Object under construction
o := f.newObject(remote, "")
err := o.mkdirAll()
if err != nil {
return nil, err
}
if o.translatedLink {
return nil, errors.New("can't open a symlink for random writing")
}
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return nil, err
}
// Pre-allocate the file for performance reasons
err = preAllocate(size, out)
if err != nil {
fs.Debugf(o, "Failed to pre-allocate: %v", err)
}
return out, nil
}
// setMetadata sets the file info from the os.FileInfo passed in // setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) { func (o *Object) setMetadata(info os.FileInfo) {
// Don't overwrite the info if we don't need to // Don't overwrite the info if we don't need to
@@ -1209,11 +1139,10 @@ func cleanWindowsName(f *Fs, name string) string {
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{} _ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{} _ fs.DirMover = &Fs{}
_ fs.OpenWriterAter = &Fs{} _ fs.Object = &Object{}
_ fs.Object = &Object{}
) )

View File

@@ -402,35 +402,6 @@ func (f *Fs) clearRoot() {
//log.Printf("cleared root directory") //log.Printf("cleared root directory")
} }
// CleanUp deletes all files currently in trash
func (f *Fs) CleanUp() (err error) {
trash := f.srv.FS.GetTrash()
items := []*mega.Node{}
_, err = f.list(trash, func(item *mega.Node) bool {
items = append(items, item)
return false
})
if err != nil {
return errors.Wrap(err, "CleanUp failed to list items in trash")
}
fs.Infof(f, "Deleting %d items from the trash", len(items))
errors := 0
// similar to f.deleteNode(trash) but with HardDelete as true
for _, item := range items {
fs.Debugf(f, "Deleting trash %q", item.GetName())
deleteErr := f.pacer.Call(func() (bool, error) {
err := f.srv.Delete(item, true)
return shouldRetry(err)
})
if deleteErr != nil {
err = deleteErr
errors++
}
}
fs.Infof(f, "Deleted %d items from the trash with %d errors", len(items), errors)
return err
}
// Return an Object from a path // Return an Object from a path
// //
// If it can't be found it returns the error fs.ErrorObjectNotFound. // If it can't be found it returns the error fs.ErrorObjectNotFound.

View File

@@ -14,8 +14,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/ncw/rclone/lib/atexit"
"github.com/ncw/rclone/backend/onedrive/api" "github.com/ncw/rclone/backend/onedrive/api"
"github.com/ncw/rclone/fs" "github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config" "github.com/ncw/rclone/fs/config"
@@ -1493,40 +1491,23 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
return nil, errors.New("unknown-sized upload not supported") return nil, errors.New("unknown-sized upload not supported")
} }
uploadURLChan := make(chan string, 1)
gracefulCancel := func() {
uploadURL, ok := <-uploadURLChan
// Reading from uploadURLChan blocks the atexit process until
// we are able to use uploadURL to cancel the upload
if !ok { // createUploadSession failed - no need to cancel upload
return
}
fs.Debugf(o, "Cancelling multipart upload")
cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
}
cancelFuncHandle := atexit.Register(gracefulCancel)
// Create upload session // Create upload session
fs.Debugf(o, "Starting multipart upload") fs.Debugf(o, "Starting multipart upload")
session, err := o.createUploadSession(modTime) session, err := o.createUploadSession(modTime)
if err != nil { if err != nil {
close(uploadURLChan)
atexit.Unregister(cancelFuncHandle)
return nil, err return nil, err
} }
uploadURL := session.UploadURL uploadURL := session.UploadURL
uploadURLChan <- uploadURL
// Cancel the session if something went wrong
defer func() { defer func() {
if err != nil { if err != nil {
fs.Debugf(o, "Error encountered during upload: %v", err) fs.Debugf(o, "Cancelling multipart upload: %v", err)
gracefulCancel() cancelErr := o.cancelUploadSession(uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
}
} }
atexit.Unregister(cancelFuncHandle)
}() }()
// Upload the chunks // Upload the chunks

View File

@@ -369,10 +369,6 @@ func init() {
Value: "s3.us-west-1.wasabisys.com", Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint", Help: "Wasabi US West endpoint",
Provider: "Wasabi", Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}}, }},
}, { }, {
Name: "location_constraint", Name: "location_constraint",
@@ -649,9 +645,6 @@ isn't set then "acl" is used instead.`,
}, { }, {
Value: "GLACIER", Value: "GLACIER",
Help: "Glacier storage class", Help: "Glacier storage class",
}, {
Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class",
}}, }},
}, { }, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm // Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
@@ -736,14 +729,6 @@ If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`, Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "use_accelerate_endpoint",
Provider: "AWS",
Help: `If true use the AWS S3 accelerated endpoint.
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}}, }},
}) })
} }
@@ -764,26 +749,25 @@ const (
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Provider string `config:"provider"` Provider string `config:"provider"`
EnvAuth bool `config:"env_auth"` EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"` AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"` SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"` Region string `config:"region"`
Endpoint string `config:"endpoint"` Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"` LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"` ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"` BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"` ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"` SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"` SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"` ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"` V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
} }
// Fs represents a remote s3 server // Fs represents a remote s3 server
@@ -957,15 +941,14 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if opt.Region == "" { if opt.Region == "" {
opt.Region = "us-east-1" opt.Region = "us-east-1"
} }
if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint { if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
opt.ForcePathStyle = false opt.ForcePathStyle = false
} }
awsConfig := aws.NewConfig(). awsConfig := aws.NewConfig().
WithMaxRetries(maxRetries). WithMaxRetries(maxRetries).
WithCredentials(cred). WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)). WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle). WithS3ForcePathStyle(opt.ForcePathStyle)
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
if opt.Region != "" { if opt.Region != "" {
awsConfig.WithRegion(opt.Region) awsConfig.WithRegion(opt.Region)
} }

View File

@@ -1,6 +1,6 @@
// Package sftp provides a filesystem interface using github.com/pkg/sftp // Package sftp provides a filesystem interface using github.com/pkg/sftp
// +build !plan9 // +build !plan9,go1.9
package sftp package sftp
@@ -14,7 +14,6 @@ import (
"os/user" "os/user"
"path" "path"
"regexp" "regexp"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -26,7 +25,6 @@ import (
"github.com/ncw/rclone/fs/config/obscure" "github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fs/fshttp" "github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash" "github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/lib/env"
"github.com/ncw/rclone/lib/readers" "github.com/ncw/rclone/lib/readers"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pkg/sftp" "github.com/pkg/sftp"
@@ -188,10 +186,10 @@ func readCurrentUser() (userName string) {
return os.Getenv("LOGNAME") return os.Getenv("LOGNAME")
} }
// dial starts a client connection to the given SSH server. It is a // Dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address, // convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client. // initiates the SSH handshake, and then sets up a Client.
func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) { func Dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
dialer := fshttp.NewDialer(fs.Config) dialer := fshttp.NewDialer(fs.Config)
conn, err := dialer.Dial(network, addr) conn, err := dialer.Dial(network, addr)
if err != nil { if err != nil {
@@ -201,7 +199,6 @@ func (f *Fs) dial(network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Clien
if err != nil { if err != nil {
return nil, err return nil, err
} }
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
return ssh.NewClient(c, chans, reqs), nil return ssh.NewClient(c, chans, reqs), nil
} }
@@ -247,7 +244,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
c = &conn{ c = &conn{
err: make(chan error, 1), err: make(chan error, 1),
} }
c.sshClient, err = f.dial("tcp", f.opt.Host+":"+f.opt.Port, f.config) c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "couldn't connect SSH") return nil, errors.Wrap(err, "couldn't connect SSH")
} }
@@ -318,6 +315,18 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
f.poolMu.Unlock() f.poolMu.Unlock()
} }
// shellExpand replaces a leading "~" with "${HOME}" and expands all environment
// variables afterwards.
func shellExpand(s string) string {
if s != "" {
if s[0] == '~' {
s = "${HOME}" + s[1:]
}
s = os.ExpandEnv(s)
}
return s
}
// NewFs creates a new Fs object from the name and root. It connects to // NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file. // the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -338,7 +347,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
Auth: []ssh.AuthMethod{}, Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(), HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: fs.Config.ConnectTimeout, Timeout: fs.Config.ConnectTimeout,
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
} }
if opt.UseInsecureCipher { if opt.UseInsecureCipher {
@@ -346,7 +354,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc") sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
} }
keyFile := env.ShellExpand(opt.KeyFile) keyFile := shellExpand(opt.KeyFile)
// Add ssh agent-auth if no password or file specified // Add ssh agent-auth if no password or file specified
if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent { if (opt.Pass == "" && keyFile == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New() sshAgentClient, _, err := sshagent.New()
@@ -805,49 +813,6 @@ func (f *Fs) Hashes() hash.Set {
return set return set
} }
// About gets usage stats
func (f *Fs) About() (*fs.Usage, error) {
c, err := f.getSftpConnection()
if err != nil {
return nil, errors.Wrap(err, "About get SFTP connection")
}
session, err := c.sshClient.NewSession()
f.putSftpConnection(&c, err)
if err != nil {
return nil, errors.Wrap(err, "About put SFTP connection")
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(f.root)
if f.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(f.opt.PathOverride, f.root))
}
if len(escapedPath) == 0 {
escapedPath = "/"
}
err = session.Run("df -k " + escapedPath)
if err != nil {
_ = session.Close()
return nil, errors.Wrap(err, "About invocation of df failed. Your remote may not support about.")
}
_ = session.Close()
usageTotal, usageUsed, usageAvail := parseUsage(stdout.Bytes())
usage := &fs.Usage{}
if usageTotal >= 0 {
usage.Total = fs.NewUsageValue(usageTotal)
}
if usageUsed >= 0 {
usage.Used = fs.NewUsageValue(usageUsed)
}
if usageAvail >= 0 {
usage.Free = fs.NewUsageValue(usageAvail)
}
return usage, nil
}
// Fs is the filesystem this remote sftp file object is located within // Fs is the filesystem this remote sftp file object is located within
func (o *Object) Fs() fs.Info { func (o *Object) Fs() fs.Info {
return o.fs return o.fs
@@ -938,35 +903,6 @@ func parseHash(bytes []byte) string {
return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator return strings.Split(string(bytes), " ")[0] // Split at hash / filename separator
} }
// Parses the byte array output from the SSH session
// returned by an invocation of df into
// the disk size, used space, and avaliable space on the disk, in that order.
// Only works when `df` has output info on only one disk
func parseUsage(bytes []byte) (spaceTotal int64, spaceUsed int64, spaceAvail int64) {
spaceTotal, spaceUsed, spaceAvail = -1, -1, -1
lines := strings.Split(string(bytes), "\n")
if len(lines) < 2 {
return
}
split := strings.Fields(lines[1])
if len(split) < 6 {
return
}
spaceTotal, err := strconv.ParseInt(split[1], 10, 64)
if err != nil {
spaceTotal = -1
}
spaceUsed, err = strconv.ParseInt(split[2], 10, 64)
if err != nil {
spaceUsed = -1
}
spaceAvail, err = strconv.ParseInt(split[3], 10, 64)
if err != nil {
spaceAvail = -1
}
return spaceTotal * 1024, spaceUsed * 1024, spaceAvail * 1024
}
// Size returns the size in bytes of the remote sftp file // Size returns the size in bytes of the remote sftp file
func (o *Object) Size() int64 { func (o *Object) Size() int64 {
return o.size return o.size

View File

@@ -1,4 +1,4 @@
// +build !plan9 // +build !plan9,go1.9
package sftp package sftp
@@ -35,17 +35,3 @@ func TestParseHash(t *testing.T) {
assert.Equal(t, test.checksum, got, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput)) assert.Equal(t, test.checksum, got, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput))
} }
} }
func TestParseUsage(t *testing.T) {
for i, test := range []struct {
sshOutput string
usage [3]int64
}{
{"Filesystem 1K-blocks Used Available Use% Mounted on\n/dev/root 91283092 81111888 10154820 89% /", [3]int64{93473886208, 83058573312, 10398535680}},
{"Filesystem 1K-blocks Used Available Use% Mounted on\ntmpfs 818256 1636 816620 1% /run", [3]int64{837894144, 1675264, 836218880}},
{"Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on\n/dev/disk0s2 244277768 94454848 149566920 39% 997820 4293969459 0% /", [3]int64{250140434432, 96721764352, 153156526080}},
} {
gotSpaceTotal, gotSpaceUsed, gotSpaceAvail := parseUsage([]byte(test.sshOutput))
assert.Equal(t, test.usage, [3]int64{gotSpaceTotal, gotSpaceUsed, gotSpaceAvail}, fmt.Sprintf("Test %d sshOutput = %q", i, test.sshOutput))
}
}

View File

@@ -1,6 +1,6 @@
// Test Sftp filesystem interface // Test Sftp filesystem interface
// +build !plan9 // +build !plan9,go1.9
package sftp_test package sftp_test

View File

@@ -1,6 +1,6 @@
// Build for sftp for unsupported platforms to stop go complaining // Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files " // about "no buildable Go source files "
// +build plan9 // +build plan9 !go1.9
package sftp package sftp

View File

@@ -1,4 +1,4 @@
// +build !plan9 // +build !plan9,go1.9
package sftp package sftp

View File

@@ -1,4 +1,4 @@
// +build !plan9 // +build !plan9,go1.9
package sftp package sftp

View File

@@ -286,31 +286,6 @@ func shouldRetry(err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
if parseErr != nil {
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
if duration <= 60*time.Second {
// Do a short sleep immediately
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
time.Sleep(duration)
return true, err
}
// Delay a long sleep for a retry
return false, fserrors.NewErrorRetryAfter(duration)
}
}
}
return shouldRetry(err)
}
// Pattern to match a swift path // Pattern to match a swift path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
@@ -438,9 +413,8 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
// Check to see if the object exists - ignoring directory markers // Check to see if the object exists - ignoring directory markers
var info swift.Object var info swift.Object
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers info, _, err = f.c.Object(container, directory)
info, rxHeaders, err = f.c.Object(container, directory) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
if err == nil && info.ContentType != directoryMarkerContentType { if err == nil && info.ContentType != directoryMarkerContentType {
f.root = path.Dir(directory) f.root = path.Dir(directory)
@@ -749,9 +723,8 @@ func (f *Fs) Mkdir(dir string) error {
var err error = swift.ContainerNotFound var err error = swift.ContainerNotFound
if !f.noCheckContainer { if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers _, _, err = f.c.Container(f.container)
_, rxHeaders, err = f.c.Container(f.container) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
} }
if err == swift.ContainerNotFound { if err == swift.ContainerNotFound {
@@ -843,9 +816,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
} }
srcFs := srcObj.fs srcFs := srcObj.fs
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers _, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@@ -955,7 +927,7 @@ func (o *Object) readMetaData() (err error) {
var h swift.Headers var h swift.Headers
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote) info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
return shouldRetryHeaders(h, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
if err == swift.ObjectNotFound { if err == swift.ObjectNotFound {
@@ -1030,9 +1002,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
headers := fs.OpenOptionHeaders(options) headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"] _, isRanging := headers["Range"]
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
return return
} }
@@ -1103,9 +1074,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
// Create the segmentsContainer if it doesn't exist // Create the segmentsContainer if it doesn't exist
var err error var err error
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers _, _, err = o.fs.c.Container(o.fs.segmentsContainer)
_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
if err == swift.ContainerNotFound { if err == swift.ContainerNotFound {
headers := swift.Headers{} headers := swift.Headers{}
@@ -1145,9 +1115,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i) segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer) fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers _, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
if err != nil { if err != nil {
return "", err return "", err
@@ -1160,9 +1129,8 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
emptyReader := bytes.NewReader(nil) emptyReader := bytes.NewReader(nil)
manifestName := o.fs.root + o.remote manifestName := o.fs.root + o.remote
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers _, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers) return shouldRetry(err)
return shouldRetryHeaders(rxHeaders, err)
}) })
return uniquePrefix + "/", err return uniquePrefix + "/", err
} }
@@ -1206,7 +1174,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
var rxHeaders swift.Headers var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers) rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
return shouldRetryHeaders(rxHeaders, err) return shouldRetry(err)
}) })
if err != nil { if err != nil {
return err return err

View File

@@ -1,13 +1,6 @@
package swift package swift
import ( import "testing"
"testing"
"time"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/swift"
"github.com/stretchr/testify/assert"
)
func TestInternalUrlEncode(t *testing.T) { func TestInternalUrlEncode(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
@@ -30,37 +23,3 @@ func TestInternalUrlEncode(t *testing.T) {
} }
} }
} }
func TestInternalShouldRetryHeaders(t *testing.T) {
headers := swift.Headers{
"Content-Length": "64",
"Content-Type": "text/html; charset=UTF-8",
"Date": "Mon: 18 Mar 2019 12:11:23 GMT",
"Retry-After": "1",
}
err := &swift.Error{
StatusCode: 429,
Text: "Too Many Requests",
}
// Short sleep should just do the sleep
start := time.Now()
retry, gotErr := shouldRetryHeaders(headers, err)
dt := time.Since(start)
assert.True(t, retry)
assert.Equal(t, err, gotErr)
assert.True(t, dt > time.Second/2)
// Long sleep should return RetryError
headers["Retry-After"] = "3600"
start = time.Now()
retry, gotErr = shouldRetryHeaders(headers, err)
dt = time.Since(start)
assert.True(t, dt < time.Second)
assert.False(t, retry)
assert.Equal(t, true, fserrors.IsRetryAfterError(gotErr))
after := gotErr.(fserrors.RetryAfter).RetryAfter()
dt = after.Sub(start)
assert.True(t, dt >= time.Hour-time.Second && dt <= time.Hour+time.Second)
}

View File

@@ -95,9 +95,6 @@ Use the --json flag for a computer readable output, eg
if err != nil { if err != nil {
return errors.Wrap(err, "About call failed") return errors.Wrap(err, "About call failed")
} }
if u == nil {
return errors.New("nil usage returned")
}
if jsonOutput { if jsonOutput {
out := json.NewEncoder(os.Stdout) out := json.NewEncoder(os.Stdout)
out.SetIndent("", "\t") out.SetIndent("", "\t")

View File

@@ -230,34 +230,22 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
SigInfoHandler() SigInfoHandler()
for try := 1; try <= *retries; try++ { for try := 1; try <= *retries; try++ {
err = f() err = f()
fs.CountError(err) if !Retry || (err == nil && !accounting.Stats.Errored()) {
lastErr := accounting.Stats.GetLastError()
if err == nil {
err = lastErr
}
if !Retry || !accounting.Stats.Errored() {
if try > 1 { if try > 1 {
fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries) fs.Errorf(nil, "Attempt %d/%d succeeded", try, *retries)
} }
break break
} }
if accounting.Stats.HadFatalError() { if fserrors.IsFatalError(err) || accounting.Stats.HadFatalError() {
fs.Errorf(nil, "Fatal error received - not attempting retries") fs.Errorf(nil, "Fatal error received - not attempting retries")
break break
} }
if accounting.Stats.Errored() && !accounting.Stats.HadRetryError() { if fserrors.IsNoRetryError(err) || (accounting.Stats.Errored() && !accounting.Stats.HadRetryError()) {
fs.Errorf(nil, "Can't retry this error - not attempting retries") fs.Errorf(nil, "Can't retry this error - not attempting retries")
break break
} }
if retryAfter := accounting.Stats.RetryAfter(); !retryAfter.IsZero() { if err != nil {
d := retryAfter.Sub(time.Now()) fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), err)
if d > 0 {
fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d)
time.Sleep(d)
}
}
if lastErr != nil {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors and: %v", try, *retries, accounting.Stats.GetErrors(), lastErr)
} else { } else {
fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, accounting.Stats.GetErrors()) fs.Errorf(nil, "Attempt %d/%d failed with %d errors", try, *retries, accounting.Stats.GetErrors())
} }
@@ -270,12 +258,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
} }
stopStats() stopStats()
if err != nil { if err != nil {
nerrs := accounting.Stats.GetErrors() log.Printf("Failed to %s: %v", cmd.Name(), err)
if nerrs <= 1 {
log.Printf("Failed to %s: %v", cmd.Name(), err)
} else {
log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, err)
}
resolveExitCode(err) resolveExitCode(err)
} }
if showStats && (accounting.Stats.Errored() || *statsInterval > 0) { if showStats && (accounting.Stats.Errored() || *statsInterval > 0) {

View File

@@ -127,7 +127,7 @@ func waitFor(fn func() bool) (ok bool) {
func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) { func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, error) {
fs.Debugf(f, "Mounting on %q", mountpoint) fs.Debugf(f, "Mounting on %q", mountpoint)
// Check the mountpoint - in Windows the mountpoint mustn't exist before the mount // Check the mountpoint - in Windows the mountpoint musn't exist before the mount
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
fi, err := os.Stat(mountpoint) fi, err := os.Stat(mountpoint)
if err != nil { if err != nil {

View File

@@ -11,7 +11,6 @@ import (
"github.com/ncw/rclone/cmd" "github.com/ncw/rclone/cmd"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/cobra/doc" "github.com/spf13/cobra/doc"
"github.com/spf13/pflag"
) )
func init() { func init() {
@@ -51,10 +50,6 @@ rclone.org website.`,
base := strings.TrimSuffix(name, path.Ext(name)) base := strings.TrimSuffix(name, path.Ext(name))
return "/commands/" + strings.ToLower(base) + "/" return "/commands/" + strings.ToLower(base) + "/"
} }
// Hide all of the root entries flags
cmd.Root.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Hidden = true
})
return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler) return doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
}, },
} }

View File

@@ -2,7 +2,7 @@ package lshelp
// Help describes the common help for all the list commands // Help describes the common help for all the list commands
var Help = ` var Help = `
Any of the filtering options can be applied to this command. Any of the filtering options can be applied to this commmand.
There are several related list commands There are several related list commands

View File

@@ -70,7 +70,6 @@ output:
o - Original ID of underlying object o - Original ID of underlying object
m - MimeType of object if known m - MimeType of object if known
e - encrypted name e - encrypted name
T - tier of storage if known, eg "Hot" or "Cool"
So if you wanted the path, size and modification time, you would use So if you wanted the path, size and modification time, you would use
--format "pst", or maybe --format "tsp" to put the path last. --format "pst", or maybe --format "tsp" to put the path last.
@@ -165,8 +164,6 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
list.SetAbsolute(absolute) list.SetAbsolute(absolute)
var opt = operations.ListJSONOpt{ var opt = operations.ListJSONOpt{
NoModTime: true, NoModTime: true,
DirsOnly: dirsOnly,
FilesOnly: filesOnly,
Recurse: recurse, Recurse: recurse,
} }
@@ -192,14 +189,21 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
case 'o': case 'o':
list.AddOrigID() list.AddOrigID()
opt.ShowOrigIDs = true opt.ShowOrigIDs = true
case 'T':
list.AddTier()
default: default:
return errors.Errorf("Unknown format character %q", char) return errors.Errorf("Unknown format character %q", char)
} }
} }
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error { return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
if item.IsDir {
if filesOnly {
return nil
}
} else {
if dirsOnly {
return nil
}
}
_, _ = fmt.Fprintln(out, list.Format(item)) _, _ = fmt.Fprintln(out, list.Format(item))
return nil return nil
}) })

View File

@@ -23,8 +23,6 @@ func init() {
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).") commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.") commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.") commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
commandDefintion.Flags().BoolVarP(&opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
commandDefintion.Flags().BoolVarP(&opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
} }
var commandDefintion = &cobra.Command{ var commandDefintion = &cobra.Command{
@@ -57,10 +55,6 @@ If --no-modtime is specified then ModTime will be blank.
If --encrypted is not specified the Encrypted won't be emitted. If --encrypted is not specified the Encrypted won't be emitted.
If --dirs-only is not specified files in addition to directories are returned
If --files-only is not specified directories in addition to the files will be returned.
The Path field will only show folders below the remote path being listed. The Path field will only show folders below the remote path being listed.
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".

View File

@@ -3,7 +3,6 @@
package mount package mount
import ( import (
"context"
"os" "os"
"time" "time"
@@ -13,6 +12,7 @@ import (
"github.com/ncw/rclone/fs/log" "github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
) )
// Dir represents a directory entry // Dir represents a directory entry
@@ -20,7 +20,7 @@ type Dir struct {
*vfs.Dir *vfs.Dir
} }
// Check interface satisfied // Check interface satsified
var _ fusefs.Node = (*Dir)(nil) var _ fusefs.Node = (*Dir)(nil)
// Attr updates the attributes of a directory // Attr updates the attributes of a directory
@@ -37,6 +37,8 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
a.Crtime = modTime a.Crtime = modTime
// FIXME include Valid so get some caching? // FIXME include Valid so get some caching?
// FIXME fs.Debugf(d.path, "Dir.Attr %+v", a) // FIXME fs.Debugf(d.path, "Dir.Attr %+v", a)
a.Size = 512
a.Blocks = 1
return nil return nil
} }

View File

@@ -3,7 +3,6 @@
package mount package mount
import ( import (
"context"
"io" "io"
"time" "time"
@@ -12,6 +11,7 @@ import (
"github.com/ncw/rclone/cmd/mountlib" "github.com/ncw/rclone/cmd/mountlib"
"github.com/ncw/rclone/fs/log" "github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
) )
// File represents a file // File represents a file

View File

@@ -5,7 +5,6 @@
package mount package mount
import ( import (
"context"
"syscall" "syscall"
"bazil.org/fuse" "bazil.org/fuse"
@@ -16,6 +15,7 @@ import (
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags" "github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
) )
// FS represents the top level filing system // FS represents the top level filing system
@@ -24,7 +24,7 @@ type FS struct {
f fs.Fs f fs.Fs
} }
// Check interface satisfied // Check interface satistfied
var _ fusefs.FS = (*FS)(nil) var _ fusefs.FS = (*FS)(nil)
// NewFS makes a new FS // NewFS makes a new FS
@@ -46,7 +46,7 @@ func (f *FS) Root() (node fusefs.Node, err error) {
return &Dir{root}, nil return &Dir{root}, nil
} }
// Check interface satisfied // Check interface satsified
var _ fusefs.FSStatfser = (*FS)(nil) var _ fusefs.FSStatfser = (*FS)(nil)
// Statfs is called to obtain file system metadata. // Statfs is called to obtain file system metadata.

View File

@@ -3,13 +3,13 @@
package mount package mount
import ( import (
"context"
"io" "io"
"bazil.org/fuse" "bazil.org/fuse"
fusefs "bazil.org/fuse/fs" fusefs "bazil.org/fuse/fs"
"github.com/ncw/rclone/fs/log" "github.com/ncw/rclone/fs/log"
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
) )
// FileHandle is an open for read file handle on a File // FileHandle is an open for read file handle on a File

View File

@@ -19,7 +19,7 @@ If source:path is a file or directory then it moves it to a file or
directory named dest:path. directory named dest:path.
This can be used to rename files or upload single files to other than This can be used to rename files or upload single files to other than
their existing name. If the source is a directory then it acts exactly their existing name. If the source is a directory then it acts exacty
like the move command. like the move command.
So So

View File

@@ -361,7 +361,7 @@ func (u *UI) Draw() error {
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %v, Objects: %d%s", fs.SizeSuffix(size), count, message) Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %v, Objects: %d%s", fs.SizeSuffix(size), count, message)
} }
// Show the box on top if required // Show the box on top if requred
if u.showBox { if u.showBox {
u.Box() u.Box()
} }

View File

@@ -82,7 +82,7 @@ var (
progressMu sync.Mutex progressMu sync.Mutex
) )
// printProgress prints the progress with an optional log // printProgress prings the progress with an optional log
func printProgress(logMessage string) { func printProgress(logMessage string) {
progressMu.Lock() progressMu.Lock()
defer progressMu.Unlock() defer progressMu.Unlock()

View File

@@ -1,3 +1,5 @@
// +build go1.8
package dlna package dlna
import ( import (
@@ -20,11 +22,11 @@ import (
var ( var (
dlnaServer *server dlnaServer *server
testURL string
) )
const ( const (
testBindAddress = "localhost:0" testBindAddress = "localhost:51777"
testURL = "http://" + testBindAddress + "/"
) )
func startServer(t *testing.T, f fs.Fs) { func startServer(t *testing.T, f fs.Fs) {
@@ -32,7 +34,6 @@ func startServer(t *testing.T, f fs.Fs) {
opt.ListenAddr = testBindAddress opt.ListenAddr = testBindAddress
dlnaServer = newServer(f, &opt) dlnaServer = newServer(f, &opt)
assert.NoError(t, dlnaServer.Serve()) assert.NoError(t, dlnaServer.Serve())
testURL = "http://" + dlnaServer.HTTPConn.Addr().String() + "/"
} }
func TestInit(t *testing.T) { func TestInit(t *testing.T) {

View File

@@ -78,7 +78,6 @@ func newServer(f fs.Fs, opt *ftpopt.Options) (*server, error) {
}, },
Hostname: host, Hostname: host,
Port: portNum, Port: portNum,
PublicIp: opt.PublicIP,
PassivePorts: opt.PassivePorts, PassivePorts: opt.PassivePorts,
Auth: &Auth{ Auth: &Auth{
BasicUser: opt.BasicUser, BasicUser: opt.BasicUser,
@@ -156,7 +155,7 @@ func (f *DriverFactory) NewDriver() (ftp.Driver, error) {
}, nil }, nil
} }
//Driver implementation of ftp server //Driver impletation of ftp server
type Driver struct { type Driver struct {
vfs *vfs.VFS vfs *vfs.VFS
lock sync.Mutex lock sync.Mutex
@@ -379,7 +378,7 @@ func (d *Driver) PutFile(path string, data io.Reader, appendData bool) (n int64,
return bytes, nil return bytes, nil
} }
//FileInfo struct to hold file info for ftp server //FileInfo struct ot hold file infor for ftp server
type FileInfo struct { type FileInfo struct {
os.FileInfo os.FileInfo
@@ -388,7 +387,7 @@ type FileInfo struct {
group uint32 group uint32
} }
//Mode return mode of file. //Mode return êrm mode of file.
func (f *FileInfo) Mode() os.FileMode { func (f *FileInfo) Mode() os.FileMode {
return f.mode return f.mode
} }
@@ -408,7 +407,7 @@ func (f *FileInfo) Group() string {
str := fmt.Sprint(f.group) str := fmt.Sprint(f.group)
g, err := user.LookupGroupId(str) g, err := user.LookupGroupId(str)
if err != nil { if err != nil {
return str //Group not found default to numerical value return str //Group not found default to numrical value
} }
return g.Name return g.Name
} }

View File

@@ -16,7 +16,6 @@ var (
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) { func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *ftpopt.Options) {
rc.AddOption("ftp", &Opt) rc.AddOption("ftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.") flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.PublicIP, prefix+"public-ip", "", Opt.PublicIP, "Public IP address to advertise for passive connections.")
flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.") flags.StringVarP(flagSet, &Opt.PassivePorts, prefix+"passive-port", "", Opt.PassivePorts, "Passive port range to use.")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.") flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)") flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication. (empty value allow every password)")

View File

@@ -24,7 +24,6 @@ You can set a single username and password with the --user and --pass flags.
type Options struct { type Options struct {
//TODO add more options //TODO add more options
ListenAddr string // Port to listen on ListenAddr string // Port to listen on
PublicIP string // Passive ports range
PassivePorts string // Passive ports range PassivePorts string // Passive ports range
BasicUser string // single username for basic auth if not using Htpasswd BasicUser string // single username for basic auth if not using Htpasswd
BasicPass string // password for BasicUser BasicPass string // password for BasicUser
@@ -33,7 +32,6 @@ type Options struct {
// DefaultOpt is the default values used for Options // DefaultOpt is the default values used for Options
var DefaultOpt = Options{ var DefaultOpt = Options{
ListenAddr: "localhost:2121", ListenAddr: "localhost:2121",
PublicIP: "",
PassivePorts: "30000-32000", PassivePorts: "30000-32000",
BasicUser: "anonymous", BasicUser: "anonymous",
BasicPass: "", BasicPass: "",

View File

@@ -1,8 +1,11 @@
// +build go1.8
package http package http
import ( import (
"flag" "flag"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"strings" "strings"
"testing" "testing"
@@ -20,11 +23,11 @@ import (
var ( var (
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test") updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
httpServer *server httpServer *server
testURL string
) )
const ( const (
testBindAddress = "localhost:0" testBindAddress = "localhost:51777"
testURL = "http://" + testBindAddress + "/"
) )
func startServer(t *testing.T, f fs.Fs) { func startServer(t *testing.T, f fs.Fs) {
@@ -32,14 +35,13 @@ func startServer(t *testing.T, f fs.Fs) {
opt.ListenAddr = testBindAddress opt.ListenAddr = testBindAddress
httpServer = newServer(f, &opt) httpServer = newServer(f, &opt)
assert.NoError(t, httpServer.Serve()) assert.NoError(t, httpServer.Serve())
testURL = httpServer.Server.URL()
// try to connect to the test server // try to connect to the test server
pause := time.Millisecond pause := time.Millisecond
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
resp, err := http.Head(testURL) conn, err := net.Dial("tcp", testBindAddress)
if err == nil { if err == nil {
_ = resp.Body.Close() _ = conn.Close()
return return
} }
// t.Logf("couldn't connect, sleeping for %v: %v", pause, err) // t.Logf("couldn't connect, sleeping for %v: %v", pause, err)

View File

@@ -0,0 +1,21 @@
// HTTP parts go1.8+
//+build go1.8
package httplib
import (
"net/http"
"time"
)
// Initialise the http.Server for post go1.8
func initServer(s *http.Server) {
s.ReadHeaderTimeout = 10 * time.Second // time to send the headers
s.IdleTimeout = 60 * time.Second // time to keep idle connections open
}
// closeServer closes the server in a non graceful way
func closeServer(s *http.Server) error {
return s.Close()
}

View File

@@ -0,0 +1,18 @@
// HTTP parts pre go1.8
//+build !go1.8
package httplib
import (
"net/http"
)
// Initialise the http.Server for pre go1.8
func initServer(s *http.Server) {
}
// closeServer closes the server in a non graceful way
func closeServer(s *http.Server) error {
return nil
}

View File

@@ -180,17 +180,17 @@ func NewServer(handler http.Handler, opt *Options) *Server {
// FIXME make a transport? // FIXME make a transport?
s.httpServer = &http.Server{ s.httpServer = &http.Server{
Addr: s.Opt.ListenAddr, Addr: s.Opt.ListenAddr,
Handler: handler, Handler: handler,
ReadTimeout: s.Opt.ServerReadTimeout, ReadTimeout: s.Opt.ServerReadTimeout,
WriteTimeout: s.Opt.ServerWriteTimeout, WriteTimeout: s.Opt.ServerWriteTimeout,
MaxHeaderBytes: s.Opt.MaxHeaderBytes, MaxHeaderBytes: s.Opt.MaxHeaderBytes,
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
IdleTimeout: 60 * time.Second, // time to keep idle connections open
TLSConfig: &tls.Config{ TLSConfig: &tls.Config{
MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier
}, },
} }
// go version specific initialisation
initServer(s.httpServer)
if s.Opt.ClientCA != "" { if s.Opt.ClientCA != "" {
if !s.useSSL { if !s.useSSL {
@@ -267,7 +267,7 @@ func (s *Server) Wait() {
// Close shuts the running server down // Close shuts the running server down
func (s *Server) Close() { func (s *Server) Close() {
err := s.httpServer.Close() err := closeServer(s.httpServer)
if err != nil { if err != nil {
log.Printf("Error on closing HTTP server: %v", err) log.Printf("Error on closing HTTP server: %v", err)
return return

View File

@@ -11,7 +11,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
// GetTemplate returns the HTML template for serving directories via HTTP // GetTemplate eturns the HTML template for serving directories via HTTP
func GetTemplate() (tpl *template.Template, err error) { func GetTemplate() (tpl *template.Template, err error) {
templateFile, err := Assets.Open("index.html") templateFile, err := Assets.Open("index.html")
if err != nil { if err != nil {

View File

@@ -17,7 +17,8 @@ import (
) )
const ( const (
testBindAddress = "localhost:0" testBindAddress = "localhost:51779"
testURL = "http://" + testBindAddress + "/"
resticSource = "../../../../../restic/restic" resticSource = "../../../../../restic/restic"
) )
@@ -61,7 +62,7 @@ func TestRestic(t *testing.T) {
} }
cmd := exec.Command("go", args...) cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),
"RESTIC_TEST_REST_REPOSITORY=rest:"+w.Server.URL()+path, "RESTIC_TEST_REST_REPOSITORY=rest:"+testURL+path,
) )
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if len(out) != 0 { if len(out) != 0 {

View File

@@ -3,12 +3,12 @@ package serve
import ( import (
"errors" "errors"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/serve/dlna" "github.com/ncw/rclone/cmd/serve/dlna"
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/cmd/serve/ftp" "github.com/ncw/rclone/cmd/serve/ftp"
"github.com/ncw/rclone/cmd/serve/http" "github.com/ncw/rclone/cmd/serve/http"
"github.com/ncw/rclone/cmd/serve/restic" "github.com/ncw/rclone/cmd/serve/restic"
"github.com/ncw/rclone/cmd/serve/sftp"
"github.com/ncw/rclone/cmd/serve/webdav" "github.com/ncw/rclone/cmd/serve/webdav"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@@ -27,9 +27,6 @@ func init() {
if ftp.Command != nil { if ftp.Command != nil {
Command.AddCommand(ftp.Command) Command.AddCommand(ftp.Command)
} }
if sftp.Command != nil {
Command.AddCommand(sftp.Command)
}
cmd.Root.AddCommand(Command) cmd.Root.AddCommand(Command)
} }

View File

@@ -1,254 +0,0 @@
// +build !plan9
package sftp
import (
"fmt"
"io"
"net"
"regexp"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/vfs"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
func describeConn(c interface {
RemoteAddr() net.Addr
LocalAddr() net.Addr
}) string {
return fmt.Sprintf("serve sftp %s->%s", c.RemoteAddr(), c.LocalAddr())
}
// Return the exit status of the command
type exitStatus struct {
RC uint32
}
// The incoming exec command
type execCommand struct {
Command string
}
var shellUnEscapeRegex = regexp.MustCompile(`\\(.)`)
// Unescape a string that was escaped by rclone
func shellUnEscape(str string) string {
str = strings.Replace(str, "'\n'", "\n", -1)
str = shellUnEscapeRegex.ReplaceAllString(str, `$1`)
return str
}
// Info about the current connection
type conn struct {
vfs *vfs.VFS
f fs.Fs
handlers sftp.Handlers
what string
}
// execCommand implements an extrememly limited number of commands to
// interoperate with the rclone sftp backend
func (c *conn) execCommand(out io.Writer, command string) (err error) {
binary, args := command, ""
space := strings.Index(command, " ")
if space >= 0 {
binary = command[:space]
args = strings.TrimLeft(command[space+1:], " ")
}
args = shellUnEscape(args)
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
switch binary {
case "df":
about := c.f.Features().About
if about == nil {
return errors.New("df not supported")
}
usage, err := about()
if err != nil {
return errors.Wrap(err, "About failed")
}
total, used, free := int64(-1), int64(-1), int64(-1)
if usage.Total != nil {
total = *usage.Total / 1024
}
if usage.Used != nil {
used = *usage.Used / 1024
}
if usage.Free != nil {
free = *usage.Free / 1024
}
perc := int64(0)
if total > 0 && used >= 0 {
perc = (100 * used) / total
}
_, err = fmt.Fprintf(out, ` Filesystem 1K-blocks Used Available Use%% Mounted on
/dev/root %d %d %d %d%% /
`, total, used, free, perc)
if err != nil {
return errors.Wrap(err, "send output failed")
}
case "md5sum", "sha1sum":
ht := hash.MD5
if binary == "sha1sum" {
ht = hash.SHA1
}
node, err := c.vfs.Stat(args)
if err != nil {
return errors.Wrapf(err, "hash failed finding file %q", args)
}
if node.IsDir() {
return errors.New("can't hash directory")
}
o, ok := node.DirEntry().(fs.ObjectInfo)
if !ok {
return errors.New("unexpected non file")
}
hash, err := o.Hash(ht)
if err != nil {
return errors.Wrap(err, "hash failed")
}
_, err = fmt.Fprintf(out, "%s %s\n", hash, args)
if err != nil {
return errors.Wrap(err, "send output failed")
}
case "echo":
// special cases for rclone command detection
switch args {
case "'abc' | md5sum":
if c.f.Hashes().Contains(hash.MD5) {
_, err = fmt.Fprintf(out, "0bee89b07a248e27c83fc3d5951213c1 -\n")
if err != nil {
return errors.Wrap(err, "send output failed")
}
} else {
return errors.New("md5 hash not supported")
}
case "'abc' | sha1sum":
if c.f.Hashes().Contains(hash.SHA1) {
_, err = fmt.Fprintf(out, "03cfd743661f07975fa2f1220c5194cbaff48451 -\n")
if err != nil {
return errors.Wrap(err, "send output failed")
}
} else {
return errors.New("sha1 hash not supported")
}
default:
_, err = fmt.Fprintf(out, "%s\n", args)
if err != nil {
return errors.Wrap(err, "send output failed")
}
}
default:
return errors.Errorf("%q not implemented\n", command)
}
return nil
}
// handle a new incoming channel request
func (c *conn) handleChannel(newChannel ssh.NewChannel) {
fs.Debugf(c.what, "Incoming channel: %s\n", newChannel.ChannelType())
if newChannel.ChannelType() != "session" {
err := newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
fs.Debugf(c.what, "Unknown channel type: %s\n", newChannel.ChannelType())
if err != nil {
fs.Errorf(c.what, "Failed to reject unknown channel: %v", err)
}
return
}
channel, requests, err := newChannel.Accept()
if err != nil {
fs.Errorf(c.what, "could not accept channel: %v", err)
return
}
defer func() {
err := channel.Close()
if err != nil {
fs.Debugf(c.what, "Failed to close channel: %v", err)
}
}()
fs.Debugf(c.what, "Channel accepted\n")
isSFTP := make(chan bool, 1)
var command execCommand
// Handle out-of-band requests
go func(in <-chan *ssh.Request) {
for req := range in {
fs.Debugf(c.what, "Request: %v\n", req.Type)
ok := false
var subSystemIsSFTP bool
var reply []byte
switch req.Type {
case "subsystem":
fs.Debugf(c.what, "Subsystem: %s\n", req.Payload[4:])
if string(req.Payload[4:]) == "sftp" {
ok = true
subSystemIsSFTP = true
}
case "exec":
err := ssh.Unmarshal(req.Payload, &command)
if err != nil {
fs.Errorf(c.what, "ignoring bad exec command: %v", err)
} else {
ok = true
subSystemIsSFTP = false
}
}
fs.Debugf(c.what, " - accepted: %v\n", ok)
err = req.Reply(ok, reply)
if err != nil {
fs.Errorf(c.what, "Failed to Reply to request: %v", err)
return
}
if ok {
// Wake up main routine after we have responded
isSFTP <- subSystemIsSFTP
}
}
}(requests)
// Wait for either subsystem "sftp" or "exec" request
if <-isSFTP {
fs.Debugf(c.what, "Starting SFTP server")
server := sftp.NewRequestServer(channel, c.handlers)
defer func() {
err := server.Close()
if err != nil {
fs.Debugf(c.what, "Failed to close server: %v", err)
}
}()
err = server.Serve()
if err == io.EOF || err == nil {
fs.Debugf(c.what, "exited session")
} else {
fs.Errorf(c.what, "completed with error: %v", err)
}
} else {
var rc = uint32(0)
err := c.execCommand(channel, command.Command)
if err != nil {
rc = 1
_, errPrint := fmt.Fprintf(channel.Stderr(), "%v\n", err)
if errPrint != nil {
fs.Errorf(c.what, "Failed to write to stderr: %v", errPrint)
}
fs.Debugf(c.what, "command %q failed with error: %v", command.Command, err)
}
_, err = channel.SendRequest("exit-status", false, ssh.Marshal(exitStatus{RC: rc}))
if err != nil {
fs.Errorf(c.what, "Failed to send exit status: %v", err)
}
}
}
// Service the incoming Channel channel in go routine
func (c *conn) handleChannels(chans <-chan ssh.NewChannel) {
for newChannel := range chans {
go c.handleChannel(newChannel)
}
}

View File

@@ -1,25 +0,0 @@
// +build !plan9
package sftp
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestShellEscape(t *testing.T) {
for i, test := range []struct {
unescaped, escaped string
}{
{"", ""},
{"/this/is/harmless", "/this/is/harmless"},
{"$(rm -rf /)", "\\$\\(rm\\ -rf\\ /\\)"},
{"/test/\n", "/test/'\n'"},
{":\"'", ":\\\"\\'"},
} {
got := shellUnEscape(test.escaped)
assert.Equal(t, test.unescaped, got, fmt.Sprintf("Test %d unescaped = %q", i, test.unescaped))
}
}

View File

@@ -1,154 +0,0 @@
// +build !plan9
package sftp
import (
"io"
"os"
"syscall"
"time"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/vfs"
"github.com/pkg/sftp"
)
// vfsHandler converts the VFS to be served by SFTP
type vfsHandler struct {
*vfs.VFS
}
// vfsHandler returns a Handlers object with the test handlers.
func newVFSHandler(vfs *vfs.VFS) (sftp.Handlers, error) {
v := vfsHandler{VFS: vfs}
return sftp.Handlers{
FileGet: v,
FilePut: v,
FileCmd: v,
FileList: v,
}, nil
}
func (v vfsHandler) Fileread(r *sftp.Request) (io.ReaderAt, error) {
file, err := v.OpenFile(r.Filepath, os.O_RDONLY, 0777)
if err != nil {
return nil, err
}
return file, nil
}
func (v vfsHandler) Filewrite(r *sftp.Request) (io.WriterAt, error) {
file, err := v.OpenFile(r.Filepath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
return nil, err
}
return file, nil
}
func (v vfsHandler) Filecmd(r *sftp.Request) error {
switch r.Method {
case "Setstat":
node, err := v.Stat(r.Filepath)
if err != nil {
return err
}
attr := r.Attributes()
if attr.Mtime != 0 {
modTime := time.Unix(int64(attr.Mtime), 0)
err := node.SetModTime(modTime)
if err != nil {
return err
}
}
return nil
case "Rename":
err := v.Rename(r.Filepath, r.Target)
if err != nil {
return err
}
case "Rmdir", "Remove":
node, err := v.Stat(r.Filepath)
if err != nil {
return err
}
err = node.Remove()
if err != nil {
return err
}
case "Mkdir":
dir, leaf, err := v.StatParent(r.Filepath)
if err != nil {
return err
}
_, err = dir.Mkdir(leaf)
if err != nil {
return err
}
case "Symlink":
// FIXME
// _, err := v.fetch(r.Filepath)
// if err != nil {
// return err
// }
// link := newMemFile(r.Target, false)
// link.symlink = r.Filepath
// v.files[r.Target] = link
}
return nil
}
type listerat []os.FileInfo
// Modeled after strings.Reader's ReadAt() implementation
func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) {
var n int
if offset >= int64(len(f)) {
return 0, io.EOF
}
n = copy(ls, f[offset:])
if n < len(ls) {
return n, io.EOF
}
return n, nil
}
func (v vfsHandler) Filelist(r *sftp.Request) (l sftp.ListerAt, err error) {
var node vfs.Node
var handle vfs.Handle
switch r.Method {
case "List":
node, err = v.Stat(r.Filepath)
if err != nil {
return nil, err
}
if !node.IsDir() {
return nil, syscall.ENOTDIR
}
handle, err = node.Open(os.O_RDONLY)
if err != nil {
return nil, err
}
defer fs.CheckClose(handle, &err)
fis, err := handle.Readdir(-1)
if err != nil {
return nil, err
}
return listerat(fis), nil
case "Stat":
node, err = v.Stat(r.Filepath)
if err != nil {
return nil, err
}
return listerat([]os.FileInfo{node}), nil
case "Readlink":
// FIXME
// if file.symlink != "" {
// file, err = v.fetch(file.symlink)
// if err != nil {
// return nil, err
// }
// }
// return listerat([]os.FileInfo{file}), nil
}
return nil, nil
}

View File

@@ -1,282 +0,0 @@
// +build !plan9
package sftp
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/subtle"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"strings"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/lib/env"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/pkg/errors"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
// server contains everything to run the server
type server struct {
f fs.Fs
opt Options
vfs *vfs.VFS
config *ssh.ServerConfig
handlers sftp.Handlers
listener net.Listener
waitChan chan struct{} // for waiting on the listener to close
}
func newServer(f fs.Fs, opt *Options) *server {
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
opt: *opt,
waitChan: make(chan struct{}),
}
return s
}
func (s *server) acceptConnections() {
for {
nConn, err := s.listener.Accept()
if err != nil {
if strings.Contains(err.Error(), "use of closed network connection") {
return
}
fs.Errorf(nil, "Failed to accept incoming connection: %v", err)
continue
}
what := describeConn(nConn)
// Before use, a handshake must be performed on the incoming net.Conn.
sshConn, chans, reqs, err := ssh.NewServerConn(nConn, s.config)
if err != nil {
fs.Errorf(what, "SSH login failed: %v", err)
continue
}
fs.Infof(what, "SSH login from %s using %s", sshConn.User(), sshConn.ClientVersion())
// Discard all global out-of-band Requests
go ssh.DiscardRequests(reqs)
c := &conn{
vfs: s.vfs,
f: s.f,
handlers: s.handlers,
what: what,
}
// Accept all channels
go c.handleChannels(chans)
}
}
// Based on example server code from golang.org/x/crypto/ssh and server_standalone
func (s *server) serve() (err error) {
var authorizedKeysMap map[string]struct{}
// Load the authorized keys
if s.opt.AuthorizedKeys != "" {
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
// If user set the flag away from the default then report an error
if err != nil && s.opt.AuthorizedKeys != DefaultOpt.AuthorizedKeys {
return err
}
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
}
if !s.opt.NoAuth && len(authorizedKeysMap) == 0 && s.opt.User == "" && s.opt.Pass == "" {
return errors.New("no authorization found, use --user/--pass or --authorized-keys or --no-auth")
}
// An SSH server is represented by a ServerConfig, which holds
// certificate details and handles authentication of ServerConns.
s.config = &ssh.ServerConfig{
ServerVersion: "SSH-2.0-" + fs.Config.UserAgent,
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Password login attempt for %s", c.User())
if s.opt.User != "" && s.opt.Pass != "" {
userOK := subtle.ConstantTimeCompare([]byte(c.User()), []byte(s.opt.User))
passOK := subtle.ConstantTimeCompare(pass, []byte(s.opt.Pass))
if (userOK & passOK) == 1 {
return nil, nil
}
}
return nil, fmt.Errorf("password rejected for %q", c.User())
},
PublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {
fs.Debugf(describeConn(c), "Public key login attempt for %s", c.User())
if _, ok := authorizedKeysMap[string(pubKey.Marshal())]; ok {
return &ssh.Permissions{
// Record the public key used for authentication.
Extensions: map[string]string{
"pubkey-fp": ssh.FingerprintSHA256(pubKey),
},
}, nil
}
return nil, fmt.Errorf("unknown public key for %q", c.User())
},
AuthLogCallback: func(conn ssh.ConnMetadata, method string, err error) {
status := "OK"
if err != nil {
status = err.Error()
}
fs.Debugf(describeConn(conn), "ssh auth %q from %q: %s", method, conn.ClientVersion(), status)
},
NoClientAuth: s.opt.NoAuth,
}
// Load the private key, from the cache if not explicitly configured
keyPath := s.opt.Key
cachePath := filepath.Join(config.CacheDir, "serve-sftp")
if keyPath == "" {
keyPath = filepath.Join(cachePath, "id_rsa")
}
private, err := loadPrivateKey(keyPath)
if err != nil && s.opt.Key == "" {
fs.Debugf(nil, "Failed to load %q: %v", keyPath, err)
// If loading a cached key failed, make the keys and retry
err = os.MkdirAll(cachePath, 0700)
if err != nil {
return errors.Wrap(err, "failed to create cache path")
}
const bits = 2048
fs.Logf(nil, "Generating %d bit key pair at %q", bits, keyPath)
err = makeSSHKeyPair(bits, keyPath+".pub", keyPath)
if err != nil {
return errors.Wrap(err, "failed to create SSH key pair")
}
// reload the new keys
private, err = loadPrivateKey(keyPath)
}
if err != nil {
return err
}
fs.Debugf(nil, "Loaded private key from %q", keyPath)
s.config.AddHostKey(private)
// Once a ServerConfig has been configured, connections can be
// accepted.
s.listener, err = net.Listen("tcp", s.opt.ListenAddr)
if err != nil {
log.Fatal("failed to listen for connection", err)
}
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
s.handlers, err = newVFSHandler(s.vfs)
if err != nil {
return errors.Wrap(err, "serve sftp: failed to create fs")
}
go s.acceptConnections()
return nil
}
// Addr returns the address the server is listening on
func (s *server) Addr() string {
return s.listener.Addr().String()
}
// Serve runs the sftp server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.serve()
if err != nil {
return err
}
return nil
}
// Wait blocks while the listener is open.
func (s *server) Wait() {
<-s.waitChan
}
// Close shuts the running server down
func (s *server) Close() {
err := s.listener.Close()
if err != nil {
fs.Errorf(nil, "Error on closing SFTP server: %v", err)
return
}
close(s.waitChan)
}
func loadPrivateKey(keyPath string) (ssh.Signer, error) {
privateBytes, err := ioutil.ReadFile(keyPath)
if err != nil {
return nil, errors.Wrap(err, "failed to load private key")
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to parse private key")
}
return private, nil
}
// Public key authentication is done by comparing
// the public key of a received connection
// with the entries in the authorized_keys file.
func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string]struct{}, err error) {
authorizedKeysBytes, err := ioutil.ReadFile(authorizedKeysPath)
if err != nil {
return nil, errors.Wrap(err, "failed to load authorized keys")
}
authorizedKeysMap = make(map[string]struct{})
for len(authorizedKeysBytes) > 0 {
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
if err != nil {
return nil, errors.Wrap(err, "failed to parse authorized keys")
}
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
}
return authorizedKeysMap, nil
}
// makeSSHKeyPair make a pair of public and private keys for SSH access.
// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file.
// Private Key generated is PEM encoded
//
// Originally from: https://stackoverflow.com/a/34347463/164234
func makeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) (err error) {
privateKey, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return err
}
// generate and write private key as PEM
privateKeyFile, err := os.OpenFile(privateKeyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer fs.CheckClose(privateKeyFile, &err)
privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
return err
}
// generate and write public key
pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
if err != nil {
return err
}
return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0644)
}

View File

@@ -1,101 +0,0 @@
// Package sftp implements an SFTP server to serve an rclone VFS
// +build !plan9
package sftp
import (
"github.com/ncw/rclone/cmd"
"github.com/ncw/rclone/fs/config/flags"
"github.com/ncw/rclone/fs/rc"
"github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// Options contains options for the http Server
type Options struct {
ListenAddr string // Port to listen on
Key string // Path to private key
AuthorizedKeys string // Path to authorized keys file
User string // single username
Pass string // password for user
NoAuth bool // allow no authentication on connections
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
ListenAddr: "localhost:2022",
AuthorizedKeys: "~/.ssh/authorized_keys",
}
// Opt is options set by command line flags
var Opt = DefaultOpt
// AddFlags adds flags for the sftp
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
rc.AddOption("sftp", &Opt)
flags.StringVarP(flagSet, &Opt.ListenAddr, "addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to.")
flags.StringVarP(flagSet, &Opt.Key, "key", "", Opt.Key, "SSH private key file (leave blank to auto generate)")
flags.StringVarP(flagSet, &Opt.AuthorizedKeys, "authorized-keys", "", Opt.AuthorizedKeys, "Authorized keys file")
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.")
}
func init() {
vfsflags.AddFlags(Command.Flags())
AddFlags(Command.Flags(), &Opt)
}
// Command definition for cobra
var Command = &cobra.Command{
Use: "sftp remote:path",
Short: `Serve the remote over SFTP.`,
Long: `rclone serve sftp implements an SFTP server to serve the remote
over SFTP. This can be used with an SFTP client or you can make a
remote of type sftp to use with it.
You can use the filter flags (eg --include, --exclude) to control what
is served.
The server will log errors. Use -v to see access logs.
--bwlimit will be respected for file transfers. Use --stats to
control the stats printing.
You must provide some means of authentication, either with --user/--pass,
an authorized keys file (specify location with --authorized-keys - the
default is the same as ssh) or set the --no-auth flag for no
authentication when logging in.
Note that this also implements a small number of shell commands so
that it can provide md5sum/sha1sum/df information for the rclone sftp
backend. This means that is can support SHA1SUMs, MD5SUMs and the
about command when paired with the rclone sftp backend.
If you don't supply a --key then rclone will generate one and cache it
for later use.
By default the server binds to localhost:2022 - if you want it to be
reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
` + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, &Opt)
err := s.Serve()
if err != nil {
return err
}
s.Wait()
return nil
})
},
}

View File

@@ -1,94 +0,0 @@
// Serve sftp tests set up a server and run the integration tests
// for the sftp remote against it.
//
// We skip tests on platforms with troublesome character mappings
//+build !windows,!darwin,!plan9
package sftp
import (
"os"
"os/exec"
"strings"
"testing"
_ "github.com/ncw/rclone/backend/local"
"github.com/ncw/rclone/fs/config/obscure"
"github.com/ncw/rclone/fstest"
"github.com/pkg/sftp"
"github.com/stretchr/testify/assert"
)
const (
testBindAddress = "localhost:0"
testUser = "testuser"
testPass = "testpass"
)
// check interfaces
var (
_ sftp.FileReader = vfsHandler{}
_ sftp.FileWriter = vfsHandler{}
_ sftp.FileCmder = vfsHandler{}
_ sftp.FileLister = vfsHandler{}
)
// TestSftp runs the sftp server then runs the unit tests for the
// sftp remote against it.
func TestSftp(t *testing.T) {
fstest.Initialise()
fremote, _, clean, err := fstest.RandomRemote(*fstest.RemoteName, *fstest.SubDir)
assert.NoError(t, err)
defer clean()
err = fremote.Mkdir("")
assert.NoError(t, err)
opt := DefaultOpt
opt.ListenAddr = testBindAddress
opt.User = testUser
opt.Pass = testPass
// Start the server
w := newServer(fremote, &opt)
assert.NoError(t, w.serve())
defer func() {
w.Close()
w.Wait()
}()
// Change directory to run the tests
err = os.Chdir("../../../backend/sftp")
assert.NoError(t, err, "failed to cd to sftp backend")
// Run the sftp tests with an on the fly remote
args := []string{"test"}
if testing.Verbose() {
args = append(args, "-v")
}
if *fstest.Verbose {
args = append(args, "-verbose")
}
args = append(args, "-remote", "sftptest:")
cmd := exec.Command("go", args...)
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
if colon < 0 {
panic("need a : in the address: " + addr)
}
host, port := addr[:colon], addr[colon+1:]
cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_SFTPTEST_TYPE=sftp",
"RCLONE_CONFIG_SFTPTEST_HOST="+host,
"RCLONE_CONFIG_SFTPTEST_PORT="+port,
"RCLONE_CONFIG_SFTPTEST_USER="+testUser,
"RCLONE_CONFIG_SFTPTEST_PASS="+obscure.MustObscure(testPass),
)
out, err := cmd.CombinedOutput()
if len(out) != 0 {
t.Logf("\n----------\n%s----------\n", string(out))
}
assert.NoError(t, err, "Running sftp integration tests")
}

View File

@@ -1,11 +0,0 @@
// Build for sftp for unsupported platforms to stop go complaining
// about "no buildable Go source files "
// +build plan9
package sftp
import "github.com/spf13/cobra"
// Command definition is nil to show not implemented
var Command *cobra.Command = nil

View File

@@ -3,7 +3,6 @@
package webdav package webdav
import ( import (
"context"
"net/http" "net/http"
"os" "os"
@@ -16,6 +15,7 @@ import (
"github.com/ncw/rclone/vfs" "github.com/ncw/rclone/vfs"
"github.com/ncw/rclone/vfs/vfsflags" "github.com/ncw/rclone/vfs/vfsflags"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/net/context" // switch to "context" when we stop supporting go1.8
"golang.org/x/net/webdav" "golang.org/x/net/webdav"
) )

View File

@@ -20,7 +20,8 @@ import (
) )
const ( const (
testBindAddress = "localhost:0" testBindAddress = "localhost:51778"
testURL = "http://" + testBindAddress + "/"
) )
// check interfaces // check interfaces
@@ -69,7 +70,7 @@ func TestWebDav(t *testing.T) {
cmd := exec.Command("go", args...) cmd := exec.Command("go", args...)
cmd.Env = append(os.Environ(), cmd.Env = append(os.Environ(),
"RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav", "RCLONE_CONFIG_WEBDAVTEST_TYPE=webdav",
"RCLONE_CONFIG_WEBDAVTEST_URL="+w.Server.URL(), "RCLONE_CONFIG_WEBDAVTEST_URL="+testURL,
"RCLONE_CONFIG_WEBDAVTEST_VENDOR=other", "RCLONE_CONFIG_WEBDAVTEST_VENDOR=other",
) )
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()

View File

@@ -20,7 +20,7 @@ Few cloud storage services provides different storage classes on objects,
for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive,
Google Cloud Storage, Regional Storage, Nearline, Coldline etc. Google Cloud Storage, Regional Storage, Nearline, Coldline etc.
Note that, certain tier changes make objects not available to access immediately. Note that, certain tier chages make objects not available to access immediately.
For example tiering to archive in azure blob storage makes objects in frozen state, For example tiering to archive in azure blob storage makes objects in frozen state,
user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object
inaccessible.true inaccessible.true

View File

@@ -61,16 +61,15 @@ Features
* [Sync](/commands/rclone_sync/) (one way) mode to make a directory identical * [Sync](/commands/rclone_sync/) (one way) mode to make a directory identical
* [Check](/commands/rclone_check/) mode to check for file hash equality * [Check](/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, eg two different cloud accounts * Can sync to and from network, eg two different cloud accounts
* [Encryption](/crypt/) backend * ([Encryption](/crypt/)) backend
* [Cache](/cache/) backend * ([Cache](/cache/)) backend
* [Union](/union/) backend * ([Union](/union/)) backend
* Optional FUSE mount ([rclone mount](/commands/rclone_mount/)) * Optional FUSE mount ([rclone mount](/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[dlna](/commands/rclone_serve_dlna/)
Links Links
* <i class="fa fa-home"></i> [Home page](https://rclone.org/) * <i class="fa fa-home"></i> [Home page](https://rclone.org/)
* <i class="fa fa-github"></i> [GitHub project page for source and bug tracker](https://github.com/ncw/rclone) * <i class="fa fa-github"></i> [GitHub project page for source and bug tracker](https://github.com/ncw/rclone)
* <i class="fa fa-comments"></i> [Rclone Forum](https://forum.rclone.org) * <i class="fa fa-comments"></i> [Rclone Forum](https://forum.rclone.org)
* <i class="fa fa-google-plus"></i> <a href="https://google.com/+RcloneOrg" rel="publisher">Google+ page</a>
* <i class="fa fa-cloud-download"></i>[Downloads](/downloads/) * <i class="fa fa-cloud-download"></i>[Downloads](/downloads/)

View File

@@ -15,7 +15,7 @@ eg `remote:directory/subdirectory` or `/directory/subdirectory`.
During the initial setup with `rclone config` you will specify the target During the initial setup with `rclone config` you will specify the target
remote. The target remote can either be a local path or another remote. remote. The target remote can either be a local path or another remote.
Subfolders can be used in target remote. Assume a alias remote named `backup` Subfolders can be used in target remote. Asume a alias remote named `backup`
with the target `mydrive:private/backup`. Invoking `rclone mkdir backup:desktop` with the target `mydrive:private/backup`. Invoking `rclone mkdir backup:desktop`
is exactly the same as invoking `rclone mkdir mydrive:private/backup/desktop`. is exactly the same as invoking `rclone mkdir mydrive:private/backup/desktop`.
@@ -41,7 +41,7 @@ n/s/q> n
name> remote name> remote
Type of storage to configure. Type of storage to configure.
Choose a number from below, or type in your own value Choose a number from below, or type in your own value
1 / Alias for an existing remote 1 / Alias for a existing remote
\ "alias" \ "alias"
2 / Amazon Drive 2 / Amazon Drive
\ "amazon cloud drive" \ "amazon cloud drive"
@@ -131,7 +131,7 @@ Copy another local directory to the alias directory called source
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/alias/alias.go then run make backenddocs --> <!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/alias/alias.go then run make backenddocs -->
### Standard Options ### Standard Options
Here are the standard options specific to alias (Alias for an existing remote). Here are the standard options specific to alias (Alias for a existing remote).
#### --alias-remote #### --alias-remote

View File

@@ -248,13 +248,3 @@ Contributors
* Dan Walters <dan@walters.io> * Dan Walters <dan@walters.io>
* Danil Semelenov <sgtpep@users.noreply.github.com> * Danil Semelenov <sgtpep@users.noreply.github.com>
* xopez <28950736+xopez@users.noreply.github.com> * xopez <28950736+xopez@users.noreply.github.com>
* Ben Boeckel <mathstuf@gmail.com>
* Manu <manu@snapdragon.cc>
* Kyle E. Mitchell <kyle@kemitchell.com>
* Gary Kim <gary@garykim.dev>
* Jon <jonathn@github.com>
* Jeff Quinn <jeffrey.quinn@bluevoyant.com>
* Peter Berbec <peter@berbec.com>
* didil <1284255+didil@users.noreply.github.com>
* id01 <gaviniboom@gmail.com>
* Robert Marko <robimarko@gmail.com>

View File

@@ -270,7 +270,7 @@ start and finish the upload) and another 2 requests for each chunk:
#### Versions #### #### Versions ####
Versions can be viewed with the `--b2-versions` flag. When it is set Versions can be viewd with the `--b2-versions` flag. When it is set
rclone will show and act on older versions of files. For example rclone will show and act on older versions of files. For example
Listing without `--b2-versions` Listing without `--b2-versions`
@@ -409,18 +409,5 @@ Disable checksums for large (> upload cutoff) files
- Type: bool - Type: bool
- Default: false - Default: false
#### --b2-download-url
Custom endpoint for downloads.
This is usually set to a Cloudflare CDN URL as Backblaze offers
free egress for data downloaded through the Cloudflare network.
Leave blank if you want to use the endpoint provided by Backblaze.
- Config: download_url
- Env Var: RCLONE_B2_DOWNLOAD_URL
- Type: string
- Default: ""
<!--- autogenerated options stop --> <!--- autogenerated options stop -->

View File

@@ -262,7 +262,7 @@ There is an issue with wrapping the remotes in this order:
During testing, I experienced a lot of bans with the remotes in this order. During testing, I experienced a lot of bans with the remotes in this order.
I suspect it might be related to how crypt opens files on the cloud provider I suspect it might be related to how crypt opens files on the cloud provider
which makes it think we're downloading the full file instead of small chunks. which makes it think we're downloading the full file instead of small chunks.
Organizing the remotes in this order yields better results: Organizing the remotes in this order yelds better results:
<span style="color:green">**cloud remote** -> **cache** -> **crypt**</span> <span style="color:green">**cloud remote** -> **cache** -> **crypt**</span>
#### absolute remote paths #### #### absolute remote paths ####

View File

@@ -1,97 +1,11 @@
--- ---
title: "Documentation" title: "Documentation"
description: "Rclone Changelog" description: "Rclone Changelog"
date: "2019-04-13" date: "2019-02-09"
--- ---
# Changelog # Changelog
## v1.47.0 - 2019-04-13
* New backends
* Backend for Koofr cloud storage service. (jaKa)
* New Features
* Resume downloads if the reader fails in copy (Nick Craig-Wood)
* this means rclone will restart transfers if the source has an error
* this is most useful for downloads or cloud to cloud copies
* Use `--fast-list` for listing operations where it won't use more memory (Nick Craig-Wood)
* this should speed up the following operations on remotes which support `ListR`
* `dedupe`, `serve restic` `lsf`, `ls`, `lsl`, `lsjson`, `lsd`, `md5sum`, `sha1sum`, `hashsum`, `size`, `delete`, `cat`, `settier`
* use `--disable ListR` to get old behaviour if required
* Make `--files-from` traverse the destination unless `--no-traverse` is set (Nick Craig-Wood)
* this fixes `--files-from` with Google drive and excessive API use in general.
* Make server side copy account bytes and obey `--max-transfer` (Nick Craig-Wood)
* Add `--create-empty-src-dirs` flag and default to not creating empty dirs (ishuah)
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
* build:
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
* Update to use go1.12.x for the build (Nick Craig-Wood)
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)
* lsjson: Added `--files-only` and `--dirs-only` flags (calistri)
* rc: Implement operations/publiclink the equivalent of `rclone link` (Nick Craig-Wood)
* Bug Fixes
* accounting: Fix total ETA when `--stats-unit bits` is in effect (Nick Craig-Wood)
* Bash TAB completion
* Use private custom func to fix clash between rclone and kubectl (Nick Craig-Wood)
* Fix for remotes with underscores in their names (Six)
* Fix completion of remotes (Florian Gamböck)
* Fix autocompletion of remote paths with spaces (Danil Semelenov)
* serve dlna: Fix root XML service descriptor (Dan Walters)
* ncdu: Fix display corruption with Chinese characters (Nick Craig-Wood)
* Add SIGTERM to signals which run the exit handlers on unix (Nick Craig-Wood)
* rc: Reload filter when the options are set via the rc (Nick Craig-Wood)
* VFS / Mount
* Fix FreeBSD: Ignore Truncate if called with no readers and already the correct size (Nick Craig-Wood)
* Read directory and check for a file before mkdir (Nick Craig-Wood)
* Shorten the locking window for vfs/refresh (Nick Craig-Wood)
* Azure Blob
* Enable MD5 checksums when uploading files bigger than the "Cutoff" (Dr.Rx)
* Fix SAS URL support (Nick Craig-Wood)
* B2
* Allow manual configuration of backblaze downloadUrl (Vince)
* Ignore already_hidden error on remove (Nick Craig-Wood)
* Ignore malformed `src_last_modified_millis` (Nick Craig-Wood)
* Drive
* Add `--skip-checksum-gphotos` to ignore incorrect checksums on Google Photos (Nick Craig-Wood)
* Allow server side move/copy between different remotes. (Fionera)
* Add docs on team drives and `--fast-list` eventual consistency (Nestar47)
* Fix imports of text files (Nick Craig-Wood)
* Fix range requests on 0 length files (Nick Craig-Wood)
* Fix creation of duplicates with server side copy (Nick Craig-Wood)
* Dropbox
* Retry blank errors to fix long listings (Nick Craig-Wood)
* FTP
* Add `--ftp-concurrency` to limit maximum number of connections (Nick Craig-Wood)
* Google Cloud Storage
* Fall back to default application credentials (marcintustin)
* Allow bucket policy only buckets (Nick Craig-Wood)
* HTTP
* Add `--http-no-slash` for websites with directories with no slashes (Nick Craig-Wood)
* Remove duplicates from listings (Nick Craig-Wood)
* Fix socket leak on 404 errors (Nick Craig-Wood)
* Jottacloud
* Fix token refresh (Sebastian Bünger)
* Add device registration (Oliver Heyme)
* Onedrive
* Implement graceful cancel of multipart uploads if rclone is interrupted (Cnly)
* Always add trailing colon to path when addressing items, (Cnly)
* Return errors instead of panic for invalid uploads (Fabian Möller)
* S3
* Add support for "Glacier Deep Archive" storage class (Manu)
* Update Dreamhost endpoint (Nick Craig-Wood)
* Note incompatibility with CEPH Jewel (Nick Craig-Wood)
* SFTP
* Allow custom ssh client config (Alexandru Bumbacea)
* Swift
* Obey Retry-After to enable OVH restore from cold storage (Nick Craig-Wood)
* Work around token expiry on CEPH (Nick Craig-Wood)
* WebDAV
* Allow IsCollection property to be integer or boolean (Nick Craig-Wood)
* Fix race when creating directories (Nick Craig-Wood)
* Fix About/df when reading the available/total returns 0 (Nick Craig-Wood)
## v1.46 - 2019-02-09 ## v1.46 - 2019-02-09
* New backends * New backends

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone" title: "rclone"
slug: rclone slug: rclone
url: /commands/rclone/ url: /commands/rclone/
@@ -26,7 +26,301 @@ rclone [flags]
### Options ### Options
``` ```
-h, --help help for rclone --acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
-h, --help help for rclone
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
-V, --version Print the version number
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
``` ```
### SEE ALSO ### SEE ALSO
@@ -79,4 +373,4 @@ rclone [flags]
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion. * [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
* [rclone version](/commands/rclone_version/) - Show the version number. * [rclone version](/commands/rclone_version/) - Show the version number.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone about" title: "rclone about"
slug: rclone_about slug: rclone_about
url: /commands/rclone_about/ url: /commands/rclone_about/
@@ -66,8 +66,306 @@ rclone about remote: [flags]
--json Format output as JSON --json Format output as JSON
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone authorize" title: "rclone authorize"
slug: rclone_authorize slug: rclone_authorize
url: /commands/rclone_authorize/ url: /commands/rclone_authorize/
@@ -25,8 +25,306 @@ rclone authorize [flags]
-h, --help help for authorize -h, --help help for authorize
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone cachestats" title: "rclone cachestats"
slug: rclone_cachestats slug: rclone_cachestats
url: /commands/rclone_cachestats/ url: /commands/rclone_cachestats/
@@ -24,8 +24,306 @@ rclone cachestats source: [flags]
-h, --help help for cachestats -h, --help help for cachestats
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone cat" title: "rclone cat"
slug: rclone_cat slug: rclone_cat
url: /commands/rclone_cat/ url: /commands/rclone_cat/
@@ -46,8 +46,306 @@ rclone cat remote:path [flags]
--tail int Only print the last N characters. --tail int Only print the last N characters.
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone check" title: "rclone check"
slug: rclone_check slug: rclone_check
url: /commands/rclone_check/ url: /commands/rclone_check/
@@ -40,8 +40,306 @@ rclone check source:path dest:path [flags]
--one-way Check one way only, source files must exist on remote --one-way Check one way only, source files must exist on remote
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone cleanup" title: "rclone cleanup"
slug: rclone_cleanup slug: rclone_cleanup
url: /commands/rclone_cleanup/ url: /commands/rclone_cleanup/
@@ -25,8 +25,306 @@ rclone cleanup remote:path [flags]
-h, --help help for cleanup -h, --help help for cleanup
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config" title: "rclone config"
slug: rclone_config slug: rclone_config
url: /commands/rclone_config/ url: /commands/rclone_config/
@@ -25,6 +25,304 @@ rclone config [flags]
-h, --help help for config -h, --help help for config
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
@@ -38,4 +336,4 @@ rclone config [flags]
* [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote. * [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote.
* [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote. * [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config create" title: "rclone config create"
slug: rclone_config_create slug: rclone_config_create
url: /commands/rclone_config_create/ url: /commands/rclone_config_create/
@@ -39,8 +39,306 @@ rclone config create <name> <type> [<key> <value>]* [flags]
-h, --help help for create -h, --help help for create
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config delete" title: "rclone config delete"
slug: rclone_config_delete slug: rclone_config_delete
url: /commands/rclone_config_delete/ url: /commands/rclone_config_delete/
@@ -22,8 +22,306 @@ rclone config delete <name> [flags]
-h, --help help for delete -h, --help help for delete
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config dump" title: "rclone config dump"
slug: rclone_config_dump slug: rclone_config_dump
url: /commands/rclone_config_dump/ url: /commands/rclone_config_dump/
@@ -22,8 +22,306 @@ rclone config dump [flags]
-h, --help help for dump -h, --help help for dump
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config edit" title: "rclone config edit"
slug: rclone_config_edit slug: rclone_config_edit
url: /commands/rclone_config_edit/ url: /commands/rclone_config_edit/
@@ -25,8 +25,306 @@ rclone config edit [flags]
-h, --help help for edit -h, --help help for edit
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config file" title: "rclone config file"
slug: rclone_config_file slug: rclone_config_file
url: /commands/rclone_config_file/ url: /commands/rclone_config_file/
@@ -22,8 +22,306 @@ rclone config file [flags]
-h, --help help for file -h, --help help for file
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config password" title: "rclone config password"
slug: rclone_config_password slug: rclone_config_password
url: /commands/rclone_config_password/ url: /commands/rclone_config_password/
@@ -29,8 +29,306 @@ rclone config password <name> [<key> <value>]+ [flags]
-h, --help help for password -h, --help help for password
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

View File

@@ -1,5 +1,5 @@
--- ---
date: 2019-05-10T23:12:21+01:00 date: 2019-02-09T10:42:18Z
title: "rclone config providers" title: "rclone config providers"
slug: rclone_config_providers slug: rclone_config_providers
url: /commands/rclone_config_providers/ url: /commands/rclone_config_providers/
@@ -22,8 +22,306 @@ rclone config providers [flags]
-h, --help help for providers -h, --help help for providers
``` ```
### Options inherited from parent commands
```
--acd-auth-url string Auth server URL.
--acd-client-id string Amazon Application Client ID.
--acd-client-secret string Amazon Application Client Secret.
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GB to wait after a failed complete upload to see if it appears. (default 3m0s)
--alias-remote string Remote or path to alias.
--ask-password Allow prompt for password for encrypted configuration. (default true)
--auto-confirm If enabled, do not request console confirmation.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use connection string or SAS URL)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100MB). (default 4M)
--azureblob-endpoint string Endpoint for the service
--azureblob-key string Storage Account Key (leave blank to use connection string or SAS URL)
--azureblob-list-chunk int Size of blob list. (default 5000)
--azureblob-sas-url string SAS URL for container level access only
--azureblob-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (<= 256MB). (default 256M)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-endpoint string Endpoint for the service.
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files.
--b2-key string Application Key
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-versions Include old versions in directory listings.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--box-client-id string Box App Client Id.
--box-client-secret string Box App Client Secret
--box-commit-retries int Max number of times to try committing a multipart file. (default 100)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50MB). (default 50M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--bwlimit BwTimetable Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
--cache-dir string Directory rclone will use for caching. (default "$HOME/.cache/rclone")
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times etc). (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verifications when connecting to the Plex server
--cache-plex-password string The password of the Plex user
--cache-plex-url string The URL of the Plex server
--cache-plex-username string The username of the Plex user
--cache-read-retries int How many times to retry a read from a cache storage. (default 10)
--cache-remote string Remote to cache.
--cache-rps int Limits the number of requests per second to the source FS (-1 to disable) (default -1)
--cache-tmp-upload-path string Directory to keep temporary files until they are uploaded.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--checkers int Number of checkers to run in parallel. (default 8)
-c, --checksum Skip based on checksum (if available) & size, not mod-time & size
--config string Config file. (default "/home/ncw/.rclone.conf")
--contimeout duration Connect timeout (default 1m0s)
-L, --copy-links Follow symlinks and copy the pointed to item.
--cpuprofile string Write cpu profile to file
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
--crypt-filename-encryption string How to encrypt the filenames. (default "standard")
--crypt-password string Password or pass phrase for encryption.
--crypt-password2 string Password or pass phrase for salt. Optional but recommended.
--crypt-remote string Remote to encrypt/decrypt.
--crypt-show-mapping For all files listed show how the names encrypt.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-alternate-export Use alternate export URLs for google documents export.,
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-client-id string Google Application Client Id
--drive-client-secret string Google Application Client Secret
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs. (default "docx,xlsx,pptx,svg")
--drive-formats string Deprecated: see export_formats
--drive-impersonate string Impersonate this user when using a service account.
--drive-import-formats string Comma separated list of preferred formats for uploading Google docs.
--drive-keep-revision-forever Keep new head revision of each file forever.
--drive-list-chunk int Size of listing chunk 100-1000. 0 to disable. (default 1000)
--drive-pacer-burst int Number of API calls to allow without sleeping. (default 100)
--drive-pacer-min-sleep Duration Minimum time to sleep between API calls. (default 100ms)
--drive-root-folder-id string ID of the root folder
--drive-scope string Scope that rclone should use when requesting access from drive.
--drive-service-account-credentials string Service Account Credentials JSON blob
--drive-service-account-file string Service Account Credentials JSON file path
--drive-shared-with-me Only show files that are shared with me.
--drive-skip-gdocs Skip google documents in all listings.
--drive-team-drive string ID of the Team Drive
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-client-id string Dropbox App Client Id
--dropbox-client-secret string Dropbox App Client Secret
--dropbox-impersonate string Impersonate this user when using a business account.
-n, --dry-run Do a trial run with no permanent changes
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
--dump-bodies Dump HTTP headers and bodies - may contain sensitive info
--dump-headers Dump HTTP bodies - may contain sensitive info
--exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read exclude patterns from file
--exclude-if-present string Exclude directories if filename is present
--fast-list Use recursive list if available. Uses more memory but fewer transactions.
--files-from stringArray Read list of source-file names from file
-f, --filter stringArray Add a file-filtering rule
--filter-from stringArray Read filtering patterns from a file
--ftp-host string FTP host to connect to
--ftp-pass string FTP password
--ftp-port string FTP port, leave blank to use default (21)
--ftp-user string FTP username, leave blank for current username, $USER
--gcs-bucket-acl string Access Control List for new buckets.
--gcs-client-id string Google Application Client Id
--gcs-client-secret string Google Application Client Secret
--gcs-location string Location for the newly created buckets.
--gcs-object-acl string Access Control List for new objects.
--gcs-project-number string Project number.
--gcs-service-account-file string Service Account Credentials JSON file path
--gcs-storage-class string The storage class to use when storing objects in Google Cloud Storage.
--http-url string URL of http host to connect to
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-client-id string Hubic Client Id
--hubic-client-secret string Hubic Client Secret
--hubic-no-chunk Don't chunk files during streaming upload.
--ignore-case Ignore case in filters (case insensitive)
--ignore-checksum Skip post copy check of checksums.
--ignore-errors delete even if there are I/O errors
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use mod-time or checksum.
-I, --ignore-times Don't skip files that match size and time - transfer all files
--immutable Do not modify files. Fail if existing files have been modified.
--include stringArray Include files matching pattern
--include-from stringArray Read include patterns from file
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-mountpoint string The mountpoint to use.
--jottacloud-unlink Remove existing public link to file/folder with link command rather than creating.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-user string User Name:
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-no-check-updated Don't check to see if the files change during upload
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--log-file string Log everything to this file
--log-format string Comma separated list of log format options (default "date,time")
--log-level string Log level DEBUG|INFO|NOTICE|ERROR (default "NOTICE")
--low-level-retries int Number of low level retries to do. (default 10)
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--max-backlog int Maximum number of objects in sync or check backlog. (default 10000)
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-depth int If set limits the recursion depth to this. (default -1)
--max-size SizeSuffix Only transfer files smaller than this in k or suffix b|k|M|G (default off)
--max-transfer SizeSuffix Maximum size of data to transfer. (default off)
--mega-debug Output more debug from Mega.
--mega-hard-delete Delete files permanently rather than putting them into the trash.
--mega-pass string Password.
--mega-user string User name
--memprofile string Write memory profile to file
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in k or suffix b|k|M|G (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-gzip-encoding Don't set Accept-Encoding: gzip.
--no-traverse Don't traverse destination file system on copy.
--no-update-modtime Don't update destination mod-time if files identical.
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k. (default 10M)
--onedrive-client-id string Microsoft App Client Id
--onedrive-client-secret string Microsoft App Client Secret
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive ( personal | business | documentLibrary )
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings.
--opendrive-password string Password.
--opendrive-username string Username
--pcloud-client-id string Pcloud App Client Id
--pcloud-client-secret string Pcloud App Client Secret
-P, --progress Show progress during transfer.
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-endpoint string Enter a endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-zone string Zone to connect to.
-q, --quiet Print as little stuff as possible
--rc Enable the remote control server.
--rc-addr string IPaddress:Port or :Port to bind server to. (default "localhost:5572")
--rc-cert string SSL PEM key (concatenation of certificate and CA certificate)
--rc-client-ca string Client certificate authority to verify clients with
--rc-files string Path to local files to serve on the HTTP server.
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods.
--rc-pass string Password for authentication.
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects.
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--rc-user string User name for authentication.
--retries int Retry operations this many times if they fail (default 3)
--retries-sleep duration Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-endpoint string Endpoint for S3 API.
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
--s3-force-path-style If true use path style access if false use virtual hosted style. (default true)
--s3-location-constraint string Location constraint - must be set to match the Region.
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
--s3-secret-access-key string AWS Secret Access Key (password)
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3.
--s3-session-token string An AWS session token
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-v2-auth If true use v2 authentication.
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-key-file string Path to PEM-encoded private key file, leave blank or set key-use-agent to use ssh-agent.
--sftp-key-file-pass string The passphrase to decrypt the PEM-encoded private key file.
--sftp-key-use-agent When set forces the usage of the ssh-agent.
--sftp-pass string SSH password, leave blank to use ssh-agent.
--sftp-path-override string Override path used by SSH connection.
--sftp-port string SSH port, leave blank to use default (22)
--sftp-set-modtime Set the modified time on the remote if set. (default true)
--sftp-use-insecure-cipher Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.
--sftp-user string SSH username, leave blank for current username, ncw
--size-only Skip based on size only, not mod-time or checksum
--skip-links Don't warn about skipped symlinks.
--stats duration Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable) (default 1m0s)
--stats-file-name-length int Max file name length in stats. 0 for no limit (default 45)
--stats-log-level string Log level to show --stats output DEBUG|INFO|NOTICE|ERROR (default "INFO")
--stats-one-line Make the stats fit on one line.
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--suffix string Suffix for use with --backup-dir.
--swift-application-credential-id string Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)
--swift-application-credential-name string Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)
--swift-application-credential-secret string Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form.
--swift-key string API key or password (OS_PASSWORD).
--swift-no-chunk Don't chunk files during streaming upload.
--swift-region string Region name - optional (OS_REGION_NAME)
--swift-storage-policy string The storage policy to use when creating a new container
--swift-storage-url string Storage URL - optional (OS_STORAGE_URL)
--swift-tenant string Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
--swift-tenant-domain string Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
--swift-user string User name to log in (OS_USERNAME).
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
--syslog Use Syslog for logging
--syslog-facility string Facility for syslog, eg KERN,USER,... (default "DAEMON")
--timeout duration IO idle timeout (default 5m0s)
--tpslimit float Limit HTTP transactions per second to this.
--tpslimit-burst int Max burst of transactions for --tpslimit. (default 1)
--track-renames When synchronizing, track file renames and do a server side move if possible
--transfers int Number of file transfers to run in parallel. (default 4)
--union-remotes string List of space separated remotes.
-u, --update Skip files that are newer on the destination.
--use-cookies Enable session cookiejar.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.46")
-v, --verbose count Print lots more stuff (repeat for more)
--webdav-bearer-token string Bearer token instead of user/pass (eg a Macaroon)
--webdav-pass string Password.
--webdav-url string URL of http host to connect to
--webdav-user string User name
--webdav-vendor string Name of the Webdav site/service/software you are using
--yandex-client-id string Yandex Client Id
--yandex-client-secret string Yandex Client Secret
--yandex-unlink Remove existing public link to file/folder with link command rather than creating.
```
### SEE ALSO ### SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
###### Auto generated by spf13/cobra on 10-May-2019 ###### Auto generated by spf13/cobra on 9-Feb-2019

Some files were not shown because too many files have changed in this diff Show More