mirror of
https://github.com/rclone/rclone.git
synced 2026-01-07 19:13:19 +00:00
Compare commits
72 Commits
fix-6032-y
...
fix-azureb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c726ae3afb | ||
|
|
e34c543660 | ||
|
|
598364ad0f | ||
|
|
211dbe9aee | ||
|
|
4829527dac | ||
|
|
cc8dde402f | ||
|
|
2b67ad17aa | ||
|
|
6da3522499 | ||
|
|
97606bbdef | ||
|
|
a15885dd74 | ||
|
|
87c201c92a | ||
|
|
d77736c21a | ||
|
|
86bd5f6922 | ||
|
|
fe271a4e35 | ||
|
|
75455d4000 | ||
|
|
82e24f521f | ||
|
|
5605e34f7b | ||
|
|
06598531e0 | ||
|
|
b1d43f8d41 | ||
|
|
b53c38c9fd | ||
|
|
03715f6c6b | ||
|
|
07481396e0 | ||
|
|
bab91e4402 | ||
|
|
fde40319ef | ||
|
|
94e330d4fa | ||
|
|
087543d723 | ||
|
|
6a759d936a | ||
|
|
7c31240bb8 | ||
|
|
25146b4306 | ||
|
|
240561850b | ||
|
|
39a1e37441 | ||
|
|
4c02f50ef5 | ||
|
|
f583b86334 | ||
|
|
118e8e1470 | ||
|
|
afcea9c72b | ||
|
|
27176cc6bb | ||
|
|
f1e4b7da7b | ||
|
|
f065a267f6 | ||
|
|
17f8014909 | ||
|
|
8ba04562c3 | ||
|
|
285747b1d1 | ||
|
|
7bb8b8f4ba | ||
|
|
59c242bbf6 | ||
|
|
a2bacd7d3f | ||
|
|
9babcc4811 | ||
|
|
a0f665ec3c | ||
|
|
ecdf42c17f | ||
|
|
be9ee1d138 | ||
|
|
9e9ead2ac4 | ||
|
|
4f78226f8b | ||
|
|
54c9c3156c | ||
|
|
6ecbbf796e | ||
|
|
603e51c43f | ||
|
|
ca4671126e | ||
|
|
6ea26b508a | ||
|
|
887cccb2c1 | ||
|
|
d975196cfa | ||
|
|
1f39b28f49 | ||
|
|
2738db22fb | ||
|
|
1978ddde73 | ||
|
|
c2bfda22ab | ||
|
|
d4da9b98d6 | ||
|
|
e4f5912294 | ||
|
|
750fffdf71 | ||
|
|
388e74af52 | ||
|
|
f9354fff2f | ||
|
|
ff1f173fc2 | ||
|
|
f8073a7b63 | ||
|
|
807f1cedaa | ||
|
|
bf9c68c88a | ||
|
|
189cba0fbe | ||
|
|
69f726f16c |
45
.github/workflows/build.yml
vendored
45
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.15', 'go1.16']
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.16', 'go1.17']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,14 +50,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
build_args: '-buildmode exe'
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
@@ -78,23 +78,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.18.x'
|
||||
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.15
|
||||
os: ubuntu-latest
|
||||
go: '1.15.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -110,6 +110,7 @@ jobs:
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
@@ -245,14 +246,14 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go 1.16
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;22.1.7171670" | grep -v = || true
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -273,8 +274,8 @@ jobs:
|
||||
|
||||
- name: install gomobile
|
||||
run: |
|
||||
go get golang.org/x/mobile/cmd/gobind
|
||||
go get golang.org/x/mobile/cmd/gomobile
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
@@ -283,7 +284,7 @@ jobs:
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
@@ -296,7 +297,7 @@ jobs:
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -309,7 +310,7 @@ jobs:
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -322,7 +323,7 @@ jobs:
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/22.1.7171670/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
|
||||
5267
MANUAL.html
generated
5267
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7550
MANUAL.txt
generated
7550
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -28,6 +28,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
* Arvan Cloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
@@ -41,6 +43,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
|
||||
@@ -43,8 +43,9 @@ import (
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
maxListChunkSize = 5000 // number of items to read at once
|
||||
maxUploadParts = 50000 // maximum allowed number of parts/blocks in a multi-part upload
|
||||
modTimeKey = "mtime"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
@@ -612,7 +613,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||
case opt.UseMSI:
|
||||
var token adal.Token
|
||||
var userMSI *userMSI = &userMSI{}
|
||||
var userMSI = &userMSI{}
|
||||
if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 {
|
||||
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
|
||||
// Validate and ensure exactly one is set. (To do: better validation.)
|
||||
@@ -1689,8 +1690,25 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
// calculate size of parts/blocks
|
||||
partSize := int(o.fs.opt.ChunkSize)
|
||||
|
||||
uploadParts := int64(maxUploadParts)
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
|
||||
// Adjust partSize until the number of parts/blocks is small enough.
|
||||
if o.size/int64(partSize) >= uploadParts {
|
||||
// Calculate partition size rounded up to the nearest MiB
|
||||
partSize = int((((o.size / uploadParts) >> 20) + 1) << 20)
|
||||
fs.Debugf(o, "Adjust partSize to %q", partSize)
|
||||
}
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
BufferSize: partSize,
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
@@ -1758,7 +1776,7 @@ func (o *Object) SetTier(tier string) error {
|
||||
blob := o.getBlobReference()
|
||||
ctx := context.Background()
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{})
|
||||
_, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}, azblob.RehydratePriorityNone)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
|
||||
6
backend/cache/cache.go
vendored
6
backend/cache/cache.go
vendored
@@ -394,7 +394,11 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
notifiedRemotes: make(map[string]bool),
|
||||
}
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||
rps := rate.Inf
|
||||
if opt.Rps > 0 {
|
||||
rps = rate.Limit(float64(opt.Rps))
|
||||
}
|
||||
f.rateLimiter = rate.NewLimiter(rps, opt.TotalWorkers)
|
||||
|
||||
f.plexConnector = &plexConnector{}
|
||||
if opt.PlexURL != "" {
|
||||
|
||||
@@ -1650,13 +1650,37 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
}
|
||||
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
if _, err = chunk.Seek(skip, io.SeekStart); err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
// after session is started, we retry everything
|
||||
if err != nil {
|
||||
// Check for incorrect offset error and retry with new offset
|
||||
if uErr, ok := err.(files.UploadSessionAppendV2APIError); ok {
|
||||
if uErr.EndpointError != nil && uErr.EndpointError.IncorrectOffset != nil {
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
fs.Debugf(o, "%s: chunk received OK - continuing", what)
|
||||
return false, nil
|
||||
} else if skip > chunkSize {
|
||||
// This error should never happen
|
||||
return false, fmt.Errorf("can't seek forwards by more than a chunk to correct offset: %s", what)
|
||||
}
|
||||
// Skip the sent data on next retry
|
||||
cursor.Offset = uint64(int64(cursor.Offset) + delta)
|
||||
fs.Debugf(o, "%s: skipping bytes on retry to fix offset", what)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err != nil, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1760,7 +1784,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
|
||||
} else {
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
entry, err = o.fs.srv.Upload(commitInfo, in)
|
||||
entry, err = o.fs.srv.Upload(&files.UploadArg{CommitInfo: *commitInfo}, in)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -295,6 +295,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -317,6 +326,7 @@ type Options struct {
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -482,7 +492,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
@@ -840,6 +850,14 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// checkBucket creates the bucket if it doesn't exist unless NoCheckBucket is true
|
||||
func (f *Fs) checkBucket(ctx context.Context, bucket string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
@@ -873,7 +891,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
err := f.checkBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1123,7 +1141,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
err := o.fs.checkBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -428,7 +428,8 @@ type Object struct {
|
||||
}
|
||||
|
||||
// Wrap base object into hasher object
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (*Object, error) {
|
||||
func (f *Fs) wrapObject(o fs.Object, err error) (obj fs.Object, outErr error) {
|
||||
// log.Trace(o, "err=%v", err)("obj=%#v, outErr=%v", &obj, &outErr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -184,7 +184,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (r io.ReadC
|
||||
// Put data into the remote path with given modTime and size
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var (
|
||||
o *Object
|
||||
o fs.Object
|
||||
common hash.Set
|
||||
rehash bool
|
||||
hashes hashMap
|
||||
@@ -224,7 +224,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
}
|
||||
}
|
||||
if len(hashes) > 0 {
|
||||
err := o.putHashes(ctx, hashes)
|
||||
err := o.(*Object).putHashes(ctx, hashes)
|
||||
fs.Debugf(o, "Applied %d source hashes, err: %v", len(hashes), err)
|
||||
}
|
||||
return o, err
|
||||
|
||||
1132
backend/internetarchive/internetarchive.go
Normal file
1132
backend/internetarchive/internetarchive.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/internetarchive/internetarchive_test.go
Normal file
17
backend/internetarchive/internetarchive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test internetarchive filesystem interface
|
||||
package internetarchive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/internetarchive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestIA:lesmi-rclone-test/",
|
||||
NilObject: (*internetarchive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -8,42 +8,69 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// default time format for almost all request and responses
|
||||
timeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
// the API server seems to use a different format
|
||||
apiTimeFormat = "2006-01-02T15:04:05Z07:00"
|
||||
// default time format historically used for all request and responses.
|
||||
// Similar to time.RFC3339, but with an extra '-' in front of 'T',
|
||||
// and no ':' separator in timezone offset. Some newer endpoints have
|
||||
// moved to proper time.RFC3339 conformant format instead.
|
||||
jottaTimeFormat = "2006-01-02-T15:04:05Z0700"
|
||||
)
|
||||
|
||||
// Time represents time values in the Jottacloud API. It uses a custom RFC3339 like format.
|
||||
type Time time.Time
|
||||
|
||||
// UnmarshalXML turns XML into a Time
|
||||
func (t *Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// unmarshalXML turns XML into a Time
|
||||
func unmarshalXMLTime(d *xml.Decoder, start xml.StartElement, timeFormat string) (time.Time, error) {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
return time.Time{}, err
|
||||
}
|
||||
if v == "" {
|
||||
*t = Time(time.Time{})
|
||||
return nil
|
||||
return time.Time{}, nil
|
||||
}
|
||||
newTime, err := time.Parse(timeFormat, v)
|
||||
if err == nil {
|
||||
*t = Time(newTime)
|
||||
return newTime, nil
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
// JottaTime represents time values in the classic API using a custom RFC3339 like format
|
||||
type JottaTime time.Time
|
||||
|
||||
// String returns JottaTime string in Jottacloud classic format
|
||||
func (t JottaTime) String() string { return time.Time(t).Format(jottaTimeFormat) }
|
||||
|
||||
// UnmarshalXML turns XML into a JottaTime
|
||||
func (t *JottaTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, jottaTimeFormat)
|
||||
*t = JottaTime(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Time into XML
|
||||
func (t *Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML turns a JottaTime into XML
|
||||
func (t *JottaTime) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// Return Time string in Jottacloud format
|
||||
func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// Rfc3339Time represents time values in the newer APIs using standard RFC3339 format
|
||||
type Rfc3339Time time.Time
|
||||
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
// String returns Rfc3339Time string in Jottacloud RFC3339 format
|
||||
func (t Rfc3339Time) String() string { return time.Time(t).Format(time.RFC3339) }
|
||||
|
||||
// UnmarshalXML turns XML into a Rfc3339Time
|
||||
func (t *Rfc3339Time) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
tm, err := unmarshalXMLTime(d, start, time.RFC3339)
|
||||
*t = Rfc3339Time(tm)
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalXML turns a Rfc3339Time into XML
|
||||
func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(t.String(), start)
|
||||
}
|
||||
|
||||
// MarshalJSON turns a Rfc3339Time into JSON
|
||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
||||
}
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
@@ -122,16 +149,11 @@ type AllocateFileResponse struct {
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
Path string `json:"path"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified int64 `json:"modified"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
@@ -338,9 +360,9 @@ type JottaFolder struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
Path string `xml:"path"`
|
||||
CreatedAt Time `xml:"created"`
|
||||
ModifiedAt Time `xml:"modified"`
|
||||
Updated Time `xml:"updated"`
|
||||
CreatedAt JottaTime `xml:"created"`
|
||||
ModifiedAt JottaTime `xml:"modified"`
|
||||
Updated JottaTime `xml:"updated"`
|
||||
Folders []JottaFolder `xml:"folders>folder"`
|
||||
Files []JottaFile `xml:"files>file"`
|
||||
}
|
||||
@@ -365,17 +387,17 @@ GET http://www.jottacloud.com/JFS/<account>/<device>/<mountpoint>/.../<file>
|
||||
// JottaFile represents a Jottacloud file
|
||||
type JottaFile struct {
|
||||
XMLName xml.Name
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt Time `xml:"currentRevision>created"`
|
||||
ModifiedAt Time `xml:"currentRevision>modified"`
|
||||
Updated Time `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
Name string `xml:"name,attr"`
|
||||
Deleted Flag `xml:"deleted,attr"`
|
||||
PublicURI string `xml:"publicURI"`
|
||||
PublicSharePath string `xml:"publicSharePath"`
|
||||
State string `xml:"currentRevision>state"`
|
||||
CreatedAt JottaTime `xml:"currentRevision>created"`
|
||||
ModifiedAt JottaTime `xml:"currentRevision>modified"`
|
||||
Updated JottaTime `xml:"currentRevision>updated"`
|
||||
Size int64 `xml:"currentRevision>size"`
|
||||
MimeType string `xml:"currentRevision>mime"`
|
||||
MD5 string `xml:"currentRevision>md5"`
|
||||
}
|
||||
|
||||
// Error is a custom Error for wrapping Jottacloud error responses
|
||||
|
||||
@@ -519,7 +519,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
values.Set("client_id", defaultClientID)
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("scope", "openid offline_access")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
@@ -932,25 +932,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
type listStreamTime time.Time
|
||||
|
||||
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var v string
|
||||
if err := d.DecodeElement(&v, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*c = listStreamTime(t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c listStreamTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
|
||||
}
|
||||
|
||||
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
|
||||
|
||||
type stats struct {
|
||||
@@ -960,12 +941,12 @@ func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, files
|
||||
var expected, actual stats
|
||||
|
||||
type xmlFile struct {
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified listStreamTime `xml:"modified"`
|
||||
Created listStreamTime `xml:"created"`
|
||||
Path string `xml:"path"`
|
||||
Name string `xml:"filename"`
|
||||
Checksum string `xml:"md5"`
|
||||
Size int64 `xml:"size"`
|
||||
Modified api.Rfc3339Time `xml:"modified"` // Note: Liststream response includes 3 decimal milliseconds, but we ignore them since there is second precision everywhere else
|
||||
Created api.Rfc3339Time `xml:"created"`
|
||||
}
|
||||
|
||||
type xmlFolder struct {
|
||||
@@ -1210,6 +1191,45 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// createOrUpdate tries to make remote file match without uploading.
|
||||
// If the remote file exists, and has matching size and md5, only
|
||||
// timestamps are updated. If the file does not exist or does does
|
||||
// not match size and md5, but matching content can be constructed
|
||||
// from deduplication, the file will be updated/created. If the file
|
||||
// is currently in trash, but can be made to match, it will be
|
||||
// restored. Returns ErrorObjectNotFound if upload will be necessary
|
||||
// to get a matching remote file.
|
||||
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: f.filePath(file),
|
||||
Parameters: url.Values{},
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
|
||||
opts.Parameters.Set("cphash", "true")
|
||||
|
||||
fileDate := api.JottaTime(modTime).String()
|
||||
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
|
||||
opts.ExtraHeaders["JMd5"] = md5
|
||||
opts.ExtraHeaders["JCreated"] = fileDate
|
||||
opts.ExtraHeaders["JModified"] = fileDate
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
// does not exist, i.e. not matching size and md5, and not possible to make it by deduplication
|
||||
if apiErr.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||
func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *api.JottaFile, err error) {
|
||||
opts := rest.Opts{
|
||||
@@ -1253,6 +1273,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't copy file: %w", err)
|
||||
}
|
||||
@@ -1554,40 +1580,19 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// prepare allocate request with existing metadata but changed timestamps
|
||||
var resp *http.Response
|
||||
var options []fs.OpenOption
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "files/v1/allocate",
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(modTime).APIString()
|
||||
var request = api.AllocateFileRequest{
|
||||
Bytes: o.size,
|
||||
Created: fileDate,
|
||||
Modified: fileDate,
|
||||
Md5: o.md5,
|
||||
Path: path.Join(o.fs.opt.Mountpoint, o.fs.opt.Enc.FromStandardPath(path.Join(o.fs.root, o.remote))),
|
||||
}
|
||||
|
||||
// send it
|
||||
var response api.AllocateFileResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
// request check/update with existing metadata and new modtime
|
||||
// (note that if size/md5 does not match, the file content will
|
||||
// also be modified if deduplication is possible, i.e. it is
|
||||
// important to use correct/latest values)
|
||||
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check response
|
||||
if response.State != "COMPLETED" {
|
||||
// could be the file was modified (size/md5 changed) between readMetaData and the allocate request
|
||||
return errors.New("metadata did not match")
|
||||
}
|
||||
|
||||
// update local metadata
|
||||
o.modTime = modTime
|
||||
return nil
|
||||
@@ -1725,7 +1730,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
Options: options,
|
||||
ExtraHeaders: make(map[string]string),
|
||||
}
|
||||
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
|
||||
|
||||
// the allocate request
|
||||
var request = api.AllocateFileRequest{
|
||||
|
||||
@@ -65,7 +65,7 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Name: "host",
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be <domain>/<internal folders>`,
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
@@ -94,7 +94,7 @@ files stored in any sub-directories that may exist.`,
|
||||
Long: `The desired path location (including applicable sub-directories) ending in
|
||||
the object that will be the target of the symlink (for example, /links/mylink).
|
||||
Include the file extension for the object, if applicable.
|
||||
rclone backend symlink <src> <path>`,
|
||||
` + "`rclone backend symlink <src> <path>`",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -4,16 +4,21 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||
for _, code := range expected {
|
||||
if resp.StatusCode == code {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
@@ -24,8 +29,10 @@ func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
// This method is called from fserrors.ShouldRetry() to determine if an error should be retried.
|
||||
// Some errors (e.g. 429 Too Many Requests) are handled before this step, so they are not included here.
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
return e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
@@ -40,6 +47,16 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if scerr, ok := err.(*statusCodeError); ok && scerr.response.StatusCode == 429 {
|
||||
delay := defaultRateLimitSleep
|
||||
header := scerr.response.Header.Get("x-ratelimit-reset")
|
||||
if header != "" {
|
||||
if resetTime, cerr := strconv.ParseInt(header, 10, 64); cerr == nil {
|
||||
delay = time.Until(time.Unix(resetTime+1, 0))
|
||||
}
|
||||
}
|
||||
return true, pacer.RetryAfterError(scerr, delay)
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
|
||||
@@ -302,8 +302,8 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.StatusCode != 201 {
|
||||
return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode)
|
||||
if err := checkStatusCode(resp, 201); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
location = resp.Header.Get("location")
|
||||
if location == "" {
|
||||
|
||||
@@ -241,7 +241,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
// fs.Debugf(o, "opening file: id=%d", o.file.ID)
|
||||
resp, err = o.fs.httpClient.Do(req)
|
||||
return shouldRetry(ctx, err)
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
if err := checkStatusCode(resp, 200, 206); err != nil {
|
||||
return shouldRetry(ctx, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
@@ -33,8 +33,9 @@ const (
|
||||
rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultChunkSize = 48 * fs.Mebi
|
||||
defaultRateLimitSleep = 60 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
307
backend/s3/s3.go
307
backend/s3/s3.go
@@ -58,7 +58,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, RackCorp, SeaweedFS, and Tencent COS",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, ArvanCloud, Digital Ocean, Dreamhost, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
@@ -75,6 +75,12 @@ func init() {
|
||||
}, {
|
||||
Value: "Ceph",
|
||||
Help: "Ceph Object Storage",
|
||||
}, {
|
||||
Value: "ChinaMobile",
|
||||
Help: "China Mobile Ecloud Elastic Object Storage (EOS)",
|
||||
}, {
|
||||
Value: "ArvanCloud",
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "Digital Ocean Spaces",
|
||||
@@ -85,7 +91,7 @@ func init() {
|
||||
Value: "IBMCOS",
|
||||
Help: "IBM COS S3",
|
||||
}, {
|
||||
Value: "Lyve",
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
@@ -294,7 +300,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,RackCorp,Scaleway,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -306,6 +312,114 @@ func init() {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
}, {
|
||||
// ChinaMobile endpoints: https://ecloud.10086.cn/op-help-center/doc/article/24534
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eos-wuxi-1.cmecloud.cn",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nEast China (Suzhou)",
|
||||
}, {
|
||||
Value: "eos-jinan-1.cmecloud.cn",
|
||||
Help: "East China (Jinan)",
|
||||
}, {
|
||||
Value: "eos-ningbo-1.cmecloud.cn",
|
||||
Help: "East China (Hangzhou)",
|
||||
}, {
|
||||
Value: "eos-shanghai-1.cmecloud.cn",
|
||||
Help: "East China (Shanghai-1)",
|
||||
}, {
|
||||
Value: "eos-zhengzhou-1.cmecloud.cn",
|
||||
Help: "Central China (Zhengzhou)",
|
||||
}, {
|
||||
Value: "eos-hunan-1.cmecloud.cn",
|
||||
Help: "Central China (Changsha-1)",
|
||||
}, {
|
||||
Value: "eos-zhuzhou-1.cmecloud.cn",
|
||||
Help: "Central China (Changsha-2)",
|
||||
}, {
|
||||
Value: "eos-guangzhou-1.cmecloud.cn",
|
||||
Help: "South China (Guangzhou-2)",
|
||||
}, {
|
||||
Value: "eos-dongguan-1.cmecloud.cn",
|
||||
Help: "South China (Guangzhou-3)",
|
||||
}, {
|
||||
Value: "eos-beijing-1.cmecloud.cn",
|
||||
Help: "North China (Beijing-1)",
|
||||
}, {
|
||||
Value: "eos-beijing-2.cmecloud.cn",
|
||||
Help: "North China (Beijing-2)",
|
||||
}, {
|
||||
Value: "eos-beijing-4.cmecloud.cn",
|
||||
Help: "North China (Beijing-3)",
|
||||
}, {
|
||||
Value: "eos-huhehaote-1.cmecloud.cn",
|
||||
Help: "North China (Huhehaote)",
|
||||
}, {
|
||||
Value: "eos-chengdu-1.cmecloud.cn",
|
||||
Help: "Southwest China (Chengdu)",
|
||||
}, {
|
||||
Value: "eos-chongqing-1.cmecloud.cn",
|
||||
Help: "Southwest China (Chongqing)",
|
||||
}, {
|
||||
Value: "eos-guiyang-1.cmecloud.cn",
|
||||
Help: "Southwest China (Guiyang)",
|
||||
}, {
|
||||
Value: "eos-xian-1.cmecloud.cn",
|
||||
Help: "Nouthwest China (Xian)",
|
||||
}, {
|
||||
Value: "eos-yunnan.cmecloud.cn",
|
||||
Help: "Yunnan China (Kunming)",
|
||||
}, {
|
||||
Value: "eos-yunnan-2.cmecloud.cn",
|
||||
Help: "Yunnan China (Kunming-2)",
|
||||
}, {
|
||||
Value: "eos-tianjin-1.cmecloud.cn",
|
||||
Help: "Tianjin China (Tianjin)",
|
||||
}, {
|
||||
Value: "eos-jilin-1.cmecloud.cn",
|
||||
Help: "Jilin China (Changchun)",
|
||||
}, {
|
||||
Value: "eos-hubei-1.cmecloud.cn",
|
||||
Help: "Hubei China (Xiangyan)",
|
||||
}, {
|
||||
Value: "eos-jiangxi-1.cmecloud.cn",
|
||||
Help: "Jiangxi China (Nanchang)",
|
||||
}, {
|
||||
Value: "eos-gansu-1.cmecloud.cn",
|
||||
Help: "Gansu China (Lanzhou)",
|
||||
}, {
|
||||
Value: "eos-shanxi-1.cmecloud.cn",
|
||||
Help: "Shanxi China (Taiyuan)",
|
||||
}, {
|
||||
Value: "eos-liaoning-1.cmecloud.cn",
|
||||
Help: "Liaoning China (Shenyang)",
|
||||
}, {
|
||||
Value: "eos-hebei-1.cmecloud.cn",
|
||||
Help: "Hebei China (Shijiazhuang)",
|
||||
}, {
|
||||
Value: "eos-fujian-1.cmecloud.cn",
|
||||
Help: "Fujian China (Xiamen)",
|
||||
}, {
|
||||
Value: "eos-guangxi-1.cmecloud.cn",
|
||||
Help: "Guangxi China (Nanning)",
|
||||
}, {
|
||||
Value: "eos-anhui-1.cmecloud.cn",
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
// ArvanCloud endpoints: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Arvan Cloud Object Storage (AOS) API.",
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nTehran Iran (Asiatech)",
|
||||
}, {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.com",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
|
||||
@@ -746,7 +860,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,Storj,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -769,8 +883,16 @@ func init() {
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.us-east-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US East 1",
|
||||
Provider: "Lyve",
|
||||
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.us-west-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US West 1 (California)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
@@ -791,6 +913,10 @@ func init() {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -872,6 +998,112 @@ func init() {
|
||||
Value: "us-gov-west-1",
|
||||
Help: "AWS GovCloud (US) Region",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint.\n\nUsed when creating buckets only.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "wuxi1",
|
||||
Help: "East China (Suzhou)",
|
||||
}, {
|
||||
Value: "jinan1",
|
||||
Help: "East China (Jinan)",
|
||||
}, {
|
||||
Value: "ningbo1",
|
||||
Help: "East China (Hangzhou)",
|
||||
}, {
|
||||
Value: "shanghai1",
|
||||
Help: "East China (Shanghai-1)",
|
||||
}, {
|
||||
Value: "zhengzhou1",
|
||||
Help: "Central China (Zhengzhou)",
|
||||
}, {
|
||||
Value: "hunan1",
|
||||
Help: "Central China (Changsha-1)",
|
||||
}, {
|
||||
Value: "zhuzhou1",
|
||||
Help: "Central China (Changsha-2)",
|
||||
}, {
|
||||
Value: "guangzhou1",
|
||||
Help: "South China (Guangzhou-2)",
|
||||
}, {
|
||||
Value: "dongguan1",
|
||||
Help: "South China (Guangzhou-3)",
|
||||
}, {
|
||||
Value: "beijing1",
|
||||
Help: "North China (Beijing-1)",
|
||||
}, {
|
||||
Value: "beijing2",
|
||||
Help: "North China (Beijing-2)",
|
||||
}, {
|
||||
Value: "beijing4",
|
||||
Help: "North China (Beijing-3)",
|
||||
}, {
|
||||
Value: "huhehaote1",
|
||||
Help: "North China (Huhehaote)",
|
||||
}, {
|
||||
Value: "chengdu1",
|
||||
Help: "Southwest China (Chengdu)",
|
||||
}, {
|
||||
Value: "chongqing1",
|
||||
Help: "Southwest China (Chongqing)",
|
||||
}, {
|
||||
Value: "guiyang1",
|
||||
Help: "Southwest China (Guiyang)",
|
||||
}, {
|
||||
Value: "xian1",
|
||||
Help: "Nouthwest China (Xian)",
|
||||
}, {
|
||||
Value: "yunnan",
|
||||
Help: "Yunnan China (Kunming)",
|
||||
}, {
|
||||
Value: "yunnan2",
|
||||
Help: "Yunnan China (Kunming-2)",
|
||||
}, {
|
||||
Value: "tianjin1",
|
||||
Help: "Tianjin China (Tianjin)",
|
||||
}, {
|
||||
Value: "jilin1",
|
||||
Help: "Jilin China (Changchun)",
|
||||
}, {
|
||||
Value: "hubei1",
|
||||
Help: "Hubei China (Xiangyan)",
|
||||
}, {
|
||||
Value: "jiangxi1",
|
||||
Help: "Jiangxi China (Nanchang)",
|
||||
}, {
|
||||
Value: "gansu1",
|
||||
Help: "Gansu China (Lanzhou)",
|
||||
}, {
|
||||
Value: "shanxi1",
|
||||
Help: "Shanxi China (Taiyuan)",
|
||||
}, {
|
||||
Value: "liaoning1",
|
||||
Help: "Liaoning China (Shenyang)",
|
||||
}, {
|
||||
Value: "hebei1",
|
||||
Help: "Hebei China (Shijiazhuang)",
|
||||
}, {
|
||||
Value: "fujian1",
|
||||
Help: "Fujian China (Xiamen)",
|
||||
}, {
|
||||
Value: "guangxi1",
|
||||
Help: "Guangxi China (Nanning)",
|
||||
}, {
|
||||
Value: "anhui1",
|
||||
Help: "Anhui China (Huainan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint.\n\nUsed when creating buckets only.",
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ir-thr-at1",
|
||||
Help: "Tehran Iran (Asiatech)",
|
||||
}, {
|
||||
Value: "ir-tbz-sh1",
|
||||
Help: "Tabriz Iran (Shahriar)",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
|
||||
@@ -1038,7 +1270,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,IBMCOS,Alibaba,ChinaMobile,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1073,11 +1305,11 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
}, {
|
||||
Value: "bucket-owner-read",
|
||||
Help: "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS",
|
||||
Provider: "!IBMCOS,ChinaMobile",
|
||||
}, {
|
||||
Value: "bucket-owner-full-control",
|
||||
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
|
||||
Provider: "!IBMCOS",
|
||||
Provider: "!IBMCOS,ChinaMobile",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
|
||||
@@ -1126,7 +1358,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "server_side_encryption",
|
||||
Help: "The server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS,Ceph,ChinaMobile,ArvanCloud,Minio",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -1134,13 +1366,14 @@ isn't set then "acl" is used instead.`,
|
||||
Value: "AES256",
|
||||
Help: "AES256",
|
||||
}, {
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
Value: "aws:kms",
|
||||
Help: "aws:kms",
|
||||
Provider: "!ChinaMobile",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS,Ceph,ChinaMobile,ArvanCloud,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1152,7 +1385,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: "If using KMS ID you must provide the ARN of Key.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS,Ceph,ArvanCloud,Minio",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
@@ -1163,7 +1396,7 @@ isn't set then "acl" is used instead.`,
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS,Ceph,ChinaMobile,ArvanCloud,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1175,7 +1408,7 @@ isn't set then "acl" is used instead.`,
|
||||
|
||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||
`,
|
||||
Provider: "AWS,Ceph,Minio",
|
||||
Provider: "AWS,Ceph,ChinaMobile,ArvanCloud,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
@@ -1231,6 +1464,36 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://ecloud.10086.cn/op-help-center/doc/article/24495
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in ChinaMobile.",
|
||||
Provider: "ChinaMobile",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode",
|
||||
}, {
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in ArvanCloud.",
|
||||
Provider: "ArvanCloud",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
Name: "storage_class",
|
||||
@@ -1942,6 +2205,14 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "ChinaMobile":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "ArvanCloud":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "DigitalOcean":
|
||||
urlEncodeListings = false
|
||||
case "Dreamhost":
|
||||
@@ -1951,8 +2222,8 @@ func setQuirks(opt *Options) {
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
case "Lyve":
|
||||
useMultipartEtag = false // Lyve seems to calculate multipart Etags differently from AWS
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
|
||||
@@ -159,6 +159,7 @@ var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -679,3 +680,43 @@ func newPrefix(prefix string) string {
|
||||
|
||||
return prefix + "/"
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Move parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.MoveObjectOptions{}
|
||||
|
||||
// Do the move
|
||||
err := f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
err = f.project.MoveObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rename object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
@@ -454,7 +454,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
if !f.findHeader(opt.Headers, "Referer") {
|
||||
f.srv.SetHeader("Referer", u.String())
|
||||
}
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
@@ -517,6 +519,17 @@ func (f *Fs) addHeaders(headers fs.CommaSepList) {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if the header was configured
|
||||
func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
|
||||
for i := 0; i < len(headers); i += 2 {
|
||||
key := f.opt.Headers[i]
|
||||
if strings.EqualFold(key, find) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
//oAuth
|
||||
@@ -1074,7 +1073,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
|
||||
// prepare upload
|
||||
var resp *http.Response
|
||||
var ur api.AsyncInfo
|
||||
@@ -1088,29 +1087,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src f
|
||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
|
||||
|
||||
// Check to see if we can calculate a MD5 and SHA256 hash and
|
||||
// if so start calculating them to do de-dupe the uploads.
|
||||
var (
|
||||
hashes = src.Fs().Hashes()
|
||||
size = src.Size()
|
||||
dedupe = size >= 0 && hashes.Contains(hash.MD5) && hashes.Contains(hash.SHA256)
|
||||
g *errgroup.Group
|
||||
gCtx context.Context
|
||||
md5sum string
|
||||
sha256sum string
|
||||
)
|
||||
if dedupe {
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
g.Go(func() (err error) {
|
||||
md5sum, err = src.Hash(gCtx, hash.MD5)
|
||||
return err
|
||||
})
|
||||
g.Go(func() (err error) {
|
||||
sha256sum, err = src.Hash(gCtx, hash.SHA256)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -1122,27 +1098,11 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src f
|
||||
|
||||
// perform the actual upload
|
||||
opts = rest.Opts{
|
||||
RootURL: ur.HRef,
|
||||
Method: "PUT",
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Body: in,
|
||||
ExtraHeaders: map[string]string{},
|
||||
NoResponse: true,
|
||||
}
|
||||
if size >= 0 {
|
||||
opts.ContentLength = &size
|
||||
}
|
||||
|
||||
// Add the hashes to the PUT to dedupe the upload if possible
|
||||
if dedupe {
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to calculate MD5 or SHA256: %v", err)
|
||||
} else {
|
||||
opts.ExtraHeaders["Expect"] = "100-continue"
|
||||
opts.ExtraHeaders["Etag"] = md5sum
|
||||
opts.ExtraHeaders["Sha256"] = sha256sum
|
||||
}
|
||||
RootURL: ur.HRef,
|
||||
Method: "PUT",
|
||||
ContentType: mimeType,
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1170,7 +1130,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
//upload file
|
||||
err = o.upload(ctx, in1, true, src, options...)
|
||||
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/billziss-gh/cgofuse.git
|
||||
docker build -t rclone/xgo-cgofuse https://github.com/winfsp/cgofuse.git
|
||||
docker images
|
||||
docker push rclone/xgo-cgofuse
|
||||
|
||||
@@ -24,6 +24,7 @@ docs = [
|
||||
"overview.md",
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -47,6 +48,7 @@ docs = [
|
||||
"hdfs.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
"mailru.md",
|
||||
@@ -66,6 +68,7 @@ docs = [
|
||||
"sftp.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/winfsp/cgofuse/fuse"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
|
||||
@@ -65,10 +65,10 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
To run rclone @ on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
|
||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
|
||||
Both of these packages are by Bill Zissimopoulos who was very helpful
|
||||
during the implementation of rclone @ for Windows.
|
||||
|
||||
@@ -218,7 +218,7 @@ from Microsoft's Sysinternals suite, which has option |-s| to start
|
||||
processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
|
||||
@@ -78,6 +78,17 @@ type MountPoint struct {
|
||||
ErrChan <-chan error
|
||||
}
|
||||
|
||||
// NewMountPoint makes a new mounting structure
|
||||
func NewMountPoint(mount MountFn, mountPoint string, f fs.Fs, mountOpt *Options, vfsOpt *vfscommon.Options) *MountPoint {
|
||||
return &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: mountPoint,
|
||||
Fs: f,
|
||||
MountOpt: *mountOpt,
|
||||
VFSOpt: *vfsOpt,
|
||||
}
|
||||
}
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
MaxLeafSize = 1024 // don't pass file names longer than this
|
||||
@@ -167,14 +178,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
defer cmd.StartStats()()
|
||||
}
|
||||
|
||||
mnt := &MountPoint{
|
||||
MountFn: mount,
|
||||
MountPoint: args[1],
|
||||
Fs: cmd.NewFsDir(args),
|
||||
MountOpt: Opt,
|
||||
VFSOpt: vfsflags.Opt,
|
||||
}
|
||||
|
||||
mnt := NewMountPoint(mount, args[1], cmd.NewFsDir(args), &Opt, &vfsflags.Opt)
|
||||
daemon, err := mnt.Mount()
|
||||
|
||||
// Wait for foreground mount, if any...
|
||||
@@ -253,6 +257,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
|
||||
}
|
||||
m.MountedOn = time.Now()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
|
||||
@@ -117,23 +116,15 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
VFS := vfs.New(fdst, &vfsOpt)
|
||||
_, unmountFn, err := mountFn(VFS, mountPoint, &mountOpt)
|
||||
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
|
||||
_, err = mnt.Mount()
|
||||
if err != nil {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = &MountPoint{
|
||||
MountPoint: mountPoint,
|
||||
MountedOn: time.Now(),
|
||||
MountFn: mountFn,
|
||||
UnmountFn: unmountFn,
|
||||
MountOpt: mountOpt,
|
||||
VFSOpt: vfsOpt,
|
||||
Fs: fdst,
|
||||
}
|
||||
liveMounts[mountPoint] = mnt
|
||||
|
||||
fs.Debugf(nil, "Mount for %s created at %s using %s", fdst.String(), mountPoint, mountType)
|
||||
return nil, nil
|
||||
|
||||
@@ -42,15 +42,31 @@ builds an in memory representation. rclone ncdu can be used during
|
||||
this scanning phase and you will see it building up the directory
|
||||
structure as it goes along.
|
||||
|
||||
Here are the keys - press '?' to toggle the help on and off
|
||||
You can interact with the user interface using key presses,
|
||||
press '?' to toggle the help on and off. The supported keys are:
|
||||
|
||||
` + strings.Join(helpText()[1:], "\n ") + `
|
||||
|
||||
Listed files/directories may be prefixed by a one-character flag,
|
||||
some of them combined with a description in brackes at end of line.
|
||||
These flags have the following meaning:
|
||||
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
may contain empty subdirectories)
|
||||
~ means this is a directory where some of the files (possibly in
|
||||
subdirectories) have unknown size, and therefore the directory
|
||||
size may be underestimated (and average size inaccurate, as it
|
||||
is average of the files with known sizes).
|
||||
. means an error occurred while reading a subdirectory, and
|
||||
therefore the directory size may be underestimated (and average
|
||||
size inaccurate)
|
||||
! means an error occurred while reading this directory
|
||||
|
||||
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
|
||||
rclone remotes. It is missing lots of features at the moment
|
||||
but is useful as it stands.
|
||||
|
||||
Note that it might take some time to delete big files/folders. The
|
||||
Note that it might take some time to delete big files/directories. The
|
||||
UI won't respond in the meantime since the deletion is done synchronously.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
@@ -283,9 +299,9 @@ func (u *UI) biggestEntry() (biggest int64) {
|
||||
return
|
||||
}
|
||||
for i := range u.entries {
|
||||
size, _, _, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if size > biggest {
|
||||
biggest = size
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.Size > biggest {
|
||||
biggest = attrs.Size
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -297,8 +313,8 @@ func (u *UI) hasEmptyDir() bool {
|
||||
return false
|
||||
}
|
||||
for i := range u.entries {
|
||||
_, count, isDir, _, _, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if isDir && count == 0 {
|
||||
attrs, _ := u.d.AttrI(u.sortPerm[i])
|
||||
if attrs.IsDir && attrs.Count == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -343,9 +359,9 @@ func (u *UI) Draw() error {
|
||||
if y >= h-1 {
|
||||
break
|
||||
}
|
||||
size, count, isDir, readable, entriesHaveErrors, err := u.d.AttrI(u.sortPerm[n])
|
||||
attrs, err := u.d.AttrI(u.sortPerm[n])
|
||||
fg := termbox.ColorWhite
|
||||
if entriesHaveErrors {
|
||||
if attrs.EntriesHaveErrors {
|
||||
fg = termbox.ColorYellow
|
||||
}
|
||||
if err != nil {
|
||||
@@ -356,15 +372,19 @@ func (u *UI) Draw() error {
|
||||
fg, bg = bg, fg
|
||||
}
|
||||
mark := ' '
|
||||
if isDir {
|
||||
if attrs.IsDir {
|
||||
mark = '/'
|
||||
}
|
||||
fileFlag := ' '
|
||||
message := ""
|
||||
if !readable {
|
||||
if !attrs.Readable {
|
||||
message = " [not read yet]"
|
||||
}
|
||||
if entriesHaveErrors {
|
||||
if attrs.CountUnknownSize > 0 {
|
||||
message = fmt.Sprintf(" [%d of %d files have unknown size, size may be underestimated]", attrs.CountUnknownSize, attrs.Count)
|
||||
fileFlag = '~'
|
||||
}
|
||||
if attrs.EntriesHaveErrors {
|
||||
message = " [some subdirectories could not be read, size may be underestimated]"
|
||||
fileFlag = '.'
|
||||
}
|
||||
@@ -374,32 +394,29 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras := ""
|
||||
if u.showCounts {
|
||||
ss := operations.CountStringField(count, u.humanReadable, 9) + " "
|
||||
if count > 0 {
|
||||
ss := operations.CountStringField(attrs.Count, u.humanReadable, 9) + " "
|
||||
if attrs.Count > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
var averageSize float64
|
||||
if count > 0 {
|
||||
averageSize = float64(size) / float64(count)
|
||||
}
|
||||
if u.showDirAverageSize {
|
||||
ss := operations.SizeStringField(int64(averageSize), u.humanReadable, 9) + " "
|
||||
if averageSize > 0 {
|
||||
avg := attrs.AverageSize()
|
||||
ss := operations.SizeStringField(int64(avg), u.humanReadable, 9) + " "
|
||||
if avg > 0 {
|
||||
extras += ss
|
||||
} else {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
if showEmptyDir {
|
||||
if isDir && count == 0 && fileFlag == ' ' {
|
||||
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
|
||||
fileFlag = 'e'
|
||||
}
|
||||
}
|
||||
if u.showGraph {
|
||||
bars := (size + perBar/2 - 1) / perBar
|
||||
bars := (attrs.Size + perBar/2 - 1) / perBar
|
||||
// clip if necessary - only happens during startup
|
||||
if bars > 10 {
|
||||
bars = 10
|
||||
@@ -408,7 +425,7 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
@@ -559,14 +576,14 @@ type ncduSort struct {
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
isize, icount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jsize, jcount, _, _, _, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if icount > 0 {
|
||||
iAvgSize = float64(isize / icount)
|
||||
if iattrs.Count > 0 {
|
||||
iAvgSize = iattrs.AverageSize()
|
||||
}
|
||||
if jcount > 0 {
|
||||
jAvgSize = float64(jsize / jcount)
|
||||
if jattrs.Count > 0 {
|
||||
jAvgSize = jattrs.AverageSize()
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -575,33 +592,33 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
case ds.u.sortByName > 0:
|
||||
break
|
||||
case ds.u.sortBySize < 0:
|
||||
if isize != jsize {
|
||||
return isize < jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size < jattrs.Size
|
||||
}
|
||||
case ds.u.sortBySize > 0:
|
||||
if isize != jsize {
|
||||
return isize > jsize
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
case ds.u.sortByCount < 0:
|
||||
if icount != jcount {
|
||||
return icount < jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count < jattrs.Count
|
||||
}
|
||||
case ds.u.sortByCount > 0:
|
||||
if icount != jcount {
|
||||
return icount > jcount
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count > jattrs.Count
|
||||
}
|
||||
case ds.u.sortByAverageSize < 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize < jsize
|
||||
return iattrs.Size < jattrs.Size
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return isize > jsize
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
|
||||
@@ -16,14 +16,42 @@ type Dir struct {
|
||||
parent *Dir
|
||||
path string
|
||||
mu sync.Mutex
|
||||
count int64
|
||||
size int64
|
||||
count int64
|
||||
countUnknownSize int64
|
||||
entries fs.DirEntries
|
||||
dirs map[string]*Dir
|
||||
readError error
|
||||
entriesHaveErrors bool
|
||||
}
|
||||
|
||||
// Attrs contains accumulated properties for a directory entry
|
||||
//
|
||||
// Files with unknown size are counted separately but also included
|
||||
// in the total count. They are not included in the size, i.e. treated
|
||||
// as empty files, which means the size may be underestimated.
|
||||
type Attrs struct {
|
||||
Size int64
|
||||
Count int64
|
||||
CountUnknownSize int64
|
||||
IsDir bool
|
||||
Readable bool
|
||||
EntriesHaveErrors bool
|
||||
}
|
||||
|
||||
// AverageSize calculates average size of files in directory
|
||||
//
|
||||
// If there are files with unknown size, this returns the average over
|
||||
// files with known sizes, which means it may be under- or
|
||||
// overestimated.
|
||||
func (a *Attrs) AverageSize() float64 {
|
||||
countKnownSize := a.Count - a.CountUnknownSize
|
||||
if countKnownSize > 0 {
|
||||
return float64(a.Size) / float64(countKnownSize)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Parent returns the directory above this one
|
||||
func (d *Dir) Parent() *Dir {
|
||||
// no locking needed since these are write once in newDir()
|
||||
@@ -49,7 +77,13 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
for _, entry := range entries {
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
d.count++
|
||||
d.size += o.Size()
|
||||
size := o.Size()
|
||||
if size < 0 {
|
||||
// Some backends may return -1 because size of object is not known
|
||||
d.countUnknownSize++
|
||||
} else {
|
||||
d.size += size
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set my directory entry in parent
|
||||
@@ -62,8 +96,9 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
||||
// Accumulate counts in parents
|
||||
for ; parent != nil; parent = parent.parent {
|
||||
parent.mu.Lock()
|
||||
parent.count += d.count
|
||||
parent.size += d.size
|
||||
parent.count += d.count
|
||||
parent.countUnknownSize += d.countUnknownSize
|
||||
if d.readError != nil {
|
||||
parent.entriesHaveErrors = true
|
||||
}
|
||||
@@ -91,17 +126,24 @@ func (d *Dir) Remove(i int) {
|
||||
// Call with d.mu held
|
||||
func (d *Dir) remove(i int) {
|
||||
size := d.entries[i].Size()
|
||||
countUnknownSize := int64(0)
|
||||
if size < 0 {
|
||||
size = 0
|
||||
countUnknownSize = 1
|
||||
}
|
||||
count := int64(1)
|
||||
|
||||
subDir, ok := d.getDir(i)
|
||||
if ok {
|
||||
size = subDir.size
|
||||
count = subDir.count
|
||||
countUnknownSize = subDir.countUnknownSize
|
||||
delete(d.dirs, path.Base(subDir.path))
|
||||
}
|
||||
|
||||
d.size -= size
|
||||
d.count -= count
|
||||
d.countUnknownSize -= countUnknownSize
|
||||
d.entries = append(d.entries[:i], d.entries[i+1:]...)
|
||||
|
||||
dir := d
|
||||
@@ -111,6 +153,7 @@ func (d *Dir) remove(i int) {
|
||||
parent.dirs[path.Base(dir.path)] = dir
|
||||
parent.size -= size
|
||||
parent.count -= count
|
||||
parent.countUnknownSize -= countUnknownSize
|
||||
dir = parent
|
||||
parent.mu.Unlock()
|
||||
}
|
||||
@@ -151,19 +194,19 @@ func (d *Dir) Attr() (size int64, count int64) {
|
||||
}
|
||||
|
||||
// AttrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool, entriesHaveErrors bool, err error) {
|
||||
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
subDir, isDir := d.getDir(i)
|
||||
|
||||
if !isDir {
|
||||
return d.entries[i].Size(), 0, false, true, d.entriesHaveErrors, d.readError
|
||||
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
|
||||
}
|
||||
if subDir == nil {
|
||||
return 0, 0, true, false, false, nil
|
||||
return Attrs{0, 0, 0, true, false, false}, nil
|
||||
}
|
||||
size, count = subDir.Attr()
|
||||
return size, count, true, true, subDir.entriesHaveErrors, subDir.readError
|
||||
size, count := subDir.Attr()
|
||||
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
|
||||
}
|
||||
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
|
||||
@@ -274,7 +274,6 @@ func (vol *Volume) mount(id string) error {
|
||||
if _, err := vol.mnt.Mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
vol.mnt.MountedOn = time.Now()
|
||||
vol.mountReqs[id] = nil
|
||||
vol.drv.monChan <- false // ask monitor to refresh channels
|
||||
return nil
|
||||
|
||||
@@ -24,26 +24,51 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "size remote:path",
|
||||
Short: `Prints the total size and number of objects in remote:path.`,
|
||||
Long: `
|
||||
Counts objects in the path and calculates the total size. Prints the
|
||||
result to standard output.
|
||||
|
||||
By default the output is in human-readable format, but shows values in
|
||||
both human-readable format as well as the raw numbers (global option
|
||||
` + "`--human-readable`" + ` is not considered). Use option ` + "`--json`" + `
|
||||
to format output as JSON instead.
|
||||
|
||||
Recurses by default, use ` + "`--max-depth 1`" + ` to stop the
|
||||
recursion.
|
||||
|
||||
Some backends do not always provide file sizes, see for example
|
||||
[Google Photos](/googlephotos/#size) and
|
||||
[Google Drive](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
var err error
|
||||
var results struct {
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Count int64 `json:"count"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Sizeless int64 `json:"sizeless"`
|
||||
}
|
||||
|
||||
results.Count, results.Bytes, err = operations.Count(context.Background(), fsrc)
|
||||
results.Count, results.Bytes, results.Sizeless, err = operations.Count(context.Background(), fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if results.Sizeless > 0 {
|
||||
fs.Logf(fsrc, "Size may be underestimated due to %d objects with unknown size", results.Sizeless)
|
||||
}
|
||||
if jsonOutput {
|
||||
return json.NewEncoder(os.Stdout).Encode(results)
|
||||
}
|
||||
fmt.Printf("Total objects: %s (%d)\n", fs.CountSuffix(results.Count), results.Count)
|
||||
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
|
||||
if results.Sizeless > 0 {
|
||||
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
},
|
||||
|
||||
@@ -5,6 +5,7 @@ package makefiles
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,7 +17,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -29,37 +32,51 @@ var (
|
||||
minFileNameLength = 4
|
||||
maxFileNameLength = 12
|
||||
seed = int64(1)
|
||||
zero = false
|
||||
sparse = false
|
||||
ascii = false
|
||||
pattern = false
|
||||
chargen = false
|
||||
|
||||
// Globals
|
||||
randSource *rand.Rand
|
||||
source io.Reader
|
||||
directoriesToCreate int
|
||||
totalDirectories int
|
||||
fileNames = map[string]struct{}{} // keep a note of which file name we've used already
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.IntVarP(cmdFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(cmdFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(cmdFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(cmdFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(cmdFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(cmdFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(cmdFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
flags.Int64VarP(cmdFlags, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
test.Command.AddCommand(makefilesCmd)
|
||||
makefilesFlags := makefilesCmd.Flags()
|
||||
flags.IntVarP(makefilesFlags, &numberOfFiles, "files", "", numberOfFiles, "Number of files to create")
|
||||
flags.IntVarP(makefilesFlags, &averageFilesPerDirectory, "files-per-directory", "", averageFilesPerDirectory, "Average number of files per directory")
|
||||
flags.IntVarP(makefilesFlags, &maxDepth, "max-depth", "", maxDepth, "Maximum depth of directory hierarchy")
|
||||
flags.FVarP(makefilesFlags, &minFileSize, "min-file-size", "", "Minimum size of file to create")
|
||||
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create")
|
||||
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names")
|
||||
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names")
|
||||
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
|
||||
// Common flags to makefiles and makefile
|
||||
for _, f := range []*pflag.FlagSet{makefilesFlags, makefileFlags} {
|
||||
flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)")
|
||||
flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00")
|
||||
flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)")
|
||||
flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only")
|
||||
flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern")
|
||||
flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern")
|
||||
}
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
var makefilesCmd = &cobra.Command{
|
||||
Use: "makefiles <dir>",
|
||||
Short: `Make a random file hierarchy in a directory`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
commonInit()
|
||||
outputDirectory := args[0]
|
||||
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
|
||||
averageSize := (minFileSize + maxFileSize) / 2
|
||||
@@ -73,13 +90,130 @@ var commandDefinition = &cobra.Command{
|
||||
totalBytes := int64(0)
|
||||
for i := 0; i < numberOfFiles; i++ {
|
||||
dir := dirs[randSource.Intn(len(dirs))]
|
||||
totalBytes += writeFile(dir, fileName())
|
||||
size := int64(minFileSize)
|
||||
if maxFileSize > minFileSize {
|
||||
size += randSource.Int63n(int64(maxFileSize - minFileSize))
|
||||
}
|
||||
writeFile(dir, fileName(), size)
|
||||
totalBytes += size
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %viB in %v at %viB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
var makefileCmd = &cobra.Command{
|
||||
Use: "makefile <size> [<file>]+ [flags]",
|
||||
Short: `Make files with random contents of the size given`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
commonInit()
|
||||
var size fs.SizeSuffix
|
||||
err := size.Set(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse size %q: %v", args[0], err)
|
||||
}
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size)
|
||||
totalBytes := int64(0)
|
||||
for _, filePath := range args[1:] {
|
||||
dir := filepath.Dir(filePath)
|
||||
name := filepath.Base(filePath)
|
||||
writeFile(dir, name, int64(size))
|
||||
totalBytes += int64(size)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
func bool2int(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// common initialisation for makefiles and makefile
|
||||
func commonInit() {
|
||||
if seed == 0 {
|
||||
seed = time.Now().UnixNano()
|
||||
fs.Logf(nil, "Using random seed = %d", seed)
|
||||
}
|
||||
randSource = rand.New(rand.NewSource(seed))
|
||||
if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 {
|
||||
log.Fatal("Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen")
|
||||
}
|
||||
switch {
|
||||
case zero, sparse:
|
||||
source = zeroReader{}
|
||||
case ascii:
|
||||
source = asciiReader{}
|
||||
case pattern:
|
||||
source = readers.NewPatternReader(math.MaxInt64)
|
||||
case chargen:
|
||||
source = &chargenReader{}
|
||||
default:
|
||||
source = randSource
|
||||
}
|
||||
if minFileSize > maxFileSize {
|
||||
maxFileSize = minFileSize
|
||||
}
|
||||
}
|
||||
|
||||
type zeroReader struct{}
|
||||
|
||||
// Read a chunk of zeroes
|
||||
func (zeroReader) Read(p []byte) (n int, err error) {
|
||||
for i := range p {
|
||||
p[i] = 0
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
type asciiReader struct{}
|
||||
|
||||
// Read a chunk of printable ASCII characters
|
||||
func (asciiReader) Read(p []byte) (n int, err error) {
|
||||
n, err = randSource.Read(p)
|
||||
for i := range p[:n] {
|
||||
p[i] = (p[i] % (0x7F - 0x20)) + 0x20
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
type chargenReader struct {
|
||||
start byte // offset from startChar to start line with
|
||||
written byte // chars in line so far
|
||||
}
|
||||
|
||||
// Read a chunk of printable ASCII characters in chargen format
|
||||
func (r *chargenReader) Read(p []byte) (n int, err error) {
|
||||
const (
|
||||
startChar = 0x20 // ' '
|
||||
endChar = 0x7E // '~' inclusive
|
||||
charsPerLine = 72
|
||||
)
|
||||
for i := range p {
|
||||
if r.written >= charsPerLine {
|
||||
r.start++
|
||||
if r.start > endChar-startChar {
|
||||
r.start = 0
|
||||
}
|
||||
p[i] = '\n'
|
||||
r.written = 0
|
||||
} else {
|
||||
c := r.start + r.written + startChar
|
||||
if c > endChar {
|
||||
c -= endChar - startChar + 1
|
||||
}
|
||||
p[i] = c
|
||||
r.written++
|
||||
}
|
||||
}
|
||||
return len(p), err
|
||||
}
|
||||
|
||||
// fileName creates a unique random file or directory name
|
||||
func fileName() (name string) {
|
||||
for {
|
||||
@@ -134,7 +268,7 @@ func (d *dir) list(path string, output []string) []string {
|
||||
}
|
||||
|
||||
// writeFile writes a random file at dir/name
|
||||
func writeFile(dir, name string) int64 {
|
||||
func writeFile(dir, name string, size int64) {
|
||||
err := file.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to make directory %q: %v", dir, err)
|
||||
@@ -144,8 +278,11 @@ func writeFile(dir, name string) int64 {
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to open file %q: %v", path, err)
|
||||
}
|
||||
size := randSource.Int63n(int64(maxFileSize-minFileSize)) + int64(minFileSize)
|
||||
_, err = io.CopyN(fd, randSource, size)
|
||||
if sparse {
|
||||
err = fd.Truncate(size)
|
||||
} else {
|
||||
_, err = io.CopyN(fd, source, size)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err)
|
||||
}
|
||||
@@ -154,5 +291,4 @@ func writeFile(dir, name string) int64 {
|
||||
log.Fatalf("Failed to close file %q: %v", path, err)
|
||||
}
|
||||
fs.Infof(path, "Written file size %v", fs.SizeSuffix(size))
|
||||
return size
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ var commandDefinition = &cobra.Command{
|
||||
cmd.Run(false, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(context.Background())
|
||||
objects, _, err := operations.Count(ctx, fsrc)
|
||||
objects, _, _, err := operations.Count(ctx, fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -112,8 +112,10 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/b2/cloud-storage.html" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud-object-storage-aos" >}}
|
||||
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
|
||||
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/s3/#scaleway" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
@@ -126,6 +128,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Hubic" home="https://hubic.com/" config="/hubic/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
|
||||
@@ -99,9 +99,11 @@ Remote or path to alias.
|
||||
|
||||
Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_ALIAS_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
@@ -168,10 +168,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ACD_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-client-secret
|
||||
|
||||
@@ -179,10 +181,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ACD_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -192,10 +196,12 @@ Here are the advanced options specific to amazon cloud drive (Amazon Drive).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ACD_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-auth-url
|
||||
|
||||
@@ -203,10 +209,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ACD_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-token-url
|
||||
|
||||
@@ -214,19 +222,23 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ACD_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-checkpoint
|
||||
|
||||
Checkpoint for internal polling (debug).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: checkpoint
|
||||
- Env Var: RCLONE_ACD_CHECKPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --acd-upload-wait-per-gb
|
||||
|
||||
@@ -252,6 +264,8 @@ of big files for a range of file sizes.
|
||||
Upload with the "-v" flag to see more info about what rclone is doing
|
||||
in this situation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_wait_per_gb
|
||||
- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
|
||||
- Type: Duration
|
||||
@@ -270,6 +284,8 @@ To download files above this threshold, rclone requests a "tempLink"
|
||||
which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: templink_threshold
|
||||
- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
|
||||
- Type: SizeSuffix
|
||||
@@ -277,10 +293,12 @@ underlying S3 storage.
|
||||
|
||||
#### --acd-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_ACD_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -574,3 +574,12 @@ put them back in again.` >}}
|
||||
* Márton Elek <elek@apache.org> <elek@users.noreply.github.com>
|
||||
* Vincent Murphy <vdm@vdm.ie>
|
||||
* ctrl-q <34975747+ctrl-q@users.noreply.github.com>
|
||||
* Nil Alexandrov <nalexand@akamai.com>
|
||||
* GuoXingbin <101376330+guoxingbin@users.noreply.github.com>
|
||||
* Berkan Teber <berkan@berkanteber.com>
|
||||
* Tobias Klauser <tklauser@distanz.ch>
|
||||
* KARBOWSKI Piotr <piotr.karbowski@gmail.com>
|
||||
* GH <geeklihui@foxmail.com>
|
||||
* rafma0 <int.main@gmail.com>
|
||||
* Adrien Rey-Jarthon <jobs@adrienjarthon.com>
|
||||
* Nick Gooding <73336146+nickgooding@users.noreply.github.com>
|
||||
|
||||
@@ -166,10 +166,12 @@ Storage Account Name.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCOUNT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-service-principal-file
|
||||
|
||||
@@ -185,10 +187,12 @@ Leave blank normally. Needed only if you want to use a service principal instead
|
||||
See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to blob data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_principal_file
|
||||
- Env Var: RCLONE_AZUREBLOB_SERVICE_PRINCIPAL_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-key
|
||||
|
||||
@@ -196,10 +200,12 @@ Storage Account Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_AZUREBLOB_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-sas-url
|
||||
|
||||
@@ -207,10 +213,12 @@ SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sas_url
|
||||
- Env Var: RCLONE_AZUREBLOB_SAS_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-use-msi
|
||||
|
||||
@@ -225,6 +233,8 @@ the user-assigned identity will be used by default. If the resource has multiple
|
||||
identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
|
||||
msi_client_id, or msi_mi_res_id parameters.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_msi
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_MSI
|
||||
- Type: bool
|
||||
@@ -236,6 +246,8 @@ Uses local storage emulator if provided as 'true'.
|
||||
|
||||
Leave blank if using real azure storage endpoint.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_emulator
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_EMULATOR
|
||||
- Type: bool
|
||||
@@ -251,10 +263,12 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_object_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_OBJECT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-msi-client-id
|
||||
|
||||
@@ -262,10 +276,12 @@ Object ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_object_id or msi_mi_res_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_client_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-msi-mi-res-id
|
||||
|
||||
@@ -273,10 +289,12 @@ Azure resource ID of the user-assigned MSI to use, if any.
|
||||
|
||||
Leave blank if msi_client_id or msi_object_id specified.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: msi_mi_res_id
|
||||
- Env Var: RCLONE_AZUREBLOB_MSI_MI_RES_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-endpoint
|
||||
|
||||
@@ -284,32 +302,65 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_AZUREBLOB_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload (<= 256 MiB) (deprecated).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CUTOFF
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-chunk-size
|
||||
|
||||
Upload chunk size (<= 100 MiB).
|
||||
Upload chunk size.
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" chunks stored at once in memory.
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_AZUREBLOB_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 4Mi
|
||||
|
||||
#### --azureblob-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_AZUREBLOB_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 16
|
||||
|
||||
#### --azureblob-list-chunk
|
||||
|
||||
Size of blob list.
|
||||
@@ -322,6 +373,8 @@ minutes per megabyte on average, it will time out (
|
||||
). This can be used to limit the number of blobs items to return, to
|
||||
avoid the time out.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_AZUREBLOB_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -342,10 +395,12 @@ If blobs are in "archive tier" at remote, trying to perform data transfer
|
||||
operations from remote will not be allowed. User should first restore by
|
||||
tiering blob to "Hot" or "Cool".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_tier
|
||||
- Env Var: RCLONE_AZUREBLOB_ACCESS_TIER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --azureblob-archive-tier-delete
|
||||
|
||||
@@ -364,6 +419,8 @@ replacement. This has the potential for data loss if the upload fails
|
||||
archive tier blobs early may be chargable.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: archive_tier_delete
|
||||
- Env Var: RCLONE_AZUREBLOB_ARCHIVE_TIER_DELETE
|
||||
- Type: bool
|
||||
@@ -378,6 +435,8 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_AZUREBLOB_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -390,6 +449,8 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -399,6 +460,8 @@ This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_AZUREBLOB_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -406,10 +469,12 @@ Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
#### --azureblob-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_AZUREBLOB_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -419,10 +484,12 @@ See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Public access level of a container: blob or container.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: public_access
|
||||
- Env Var: RCLONE_AZUREBLOB_PUBLIC_ACCESS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request.
|
||||
@@ -436,6 +503,8 @@ Public access level of a container: blob or container.
|
||||
|
||||
If set, do not do HEAD before GET when getting objects.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_head_object
|
||||
- Env Var: RCLONE_AZUREBLOB_NO_HEAD_OBJECT
|
||||
- Type: bool
|
||||
|
||||
@@ -329,24 +329,30 @@ Here are the standard options specific to b2 (Backblaze B2).
|
||||
|
||||
Account ID or Application Key ID.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_B2_ACCOUNT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --b2-key
|
||||
|
||||
Application Key.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: key
|
||||
- Env Var: RCLONE_B2_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --b2-hard-delete
|
||||
|
||||
Permanently delete files on remote removal, otherwise hide files.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_B2_HARD_DELETE
|
||||
- Type: bool
|
||||
@@ -362,10 +368,12 @@ Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_B2_ENDPOINT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-test-mode
|
||||
|
||||
@@ -381,10 +389,12 @@ below will cause b2 to return specific errors:
|
||||
These will be set in the "X-Bz-Test-Mode" header which is documented
|
||||
in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: test_mode
|
||||
- Env Var: RCLONE_B2_TEST_MODE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-versions
|
||||
|
||||
@@ -393,6 +403,8 @@ Include old versions in directory listings.
|
||||
Note that when using this no file write operations are permitted,
|
||||
so you can't upload files or delete them.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: versions
|
||||
- Env Var: RCLONE_B2_VERSIONS
|
||||
- Type: bool
|
||||
@@ -406,6 +418,8 @@ Files above this size will be uploaded in chunks of "--b2-chunk-size".
|
||||
|
||||
This value should be set no larger than 4.657 GiB (== 5 GB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_B2_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -420,6 +434,8 @@ copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 4.6 GiB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_B2_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -436,6 +452,8 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
|
||||
5,000,000 Bytes is the minimum size.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_B2_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -450,6 +468,8 @@ uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_B2_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
@@ -466,10 +486,20 @@ If the custom endpoint rewrites the requests for authentication,
|
||||
e.g., in Cloudflare Workers, this header needs to be handled properly.
|
||||
Leave blank if you want to use the endpoint provided by Backblaze.
|
||||
|
||||
The URL provided here SHOULD have the protocol and SHOULD NOT have
|
||||
a trailing slash or specify the /file/bucket subpath as rclone will
|
||||
request files with "{download_url}/file/{bucket_name}/{path}".
|
||||
|
||||
Example:
|
||||
> https://mysubdomain.mydomain.tld
|
||||
(No trailing "/", "file" or "bucket")
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_url
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --b2-download-auth-duration
|
||||
|
||||
@@ -478,6 +508,8 @@ Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: download_auth_duration
|
||||
- Env Var: RCLONE_B2_DOWNLOAD_AUTH_DURATION
|
||||
- Type: Duration
|
||||
@@ -489,6 +521,8 @@ How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_flush_time
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_FLUSH_TIME
|
||||
- Type: Duration
|
||||
@@ -498,6 +532,8 @@ This option controls how often unused buffers will be removed from the pool.
|
||||
|
||||
Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: memory_pool_use_mmap
|
||||
- Env Var: RCLONE_B2_MEMORY_POOL_USE_MMAP
|
||||
- Type: bool
|
||||
@@ -505,10 +541,12 @@ Whether to use mmap buffers in internal memory pool.
|
||||
|
||||
#### --b2-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_B2_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -275,10 +275,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_BOX_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-client-secret
|
||||
|
||||
@@ -286,10 +288,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_BOX_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-box-config-file
|
||||
|
||||
@@ -299,10 +303,12 @@ Leave blank normally.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: box_config_file
|
||||
- Env Var: RCLONE_BOX_BOX_CONFIG_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-access-token
|
||||
|
||||
@@ -310,15 +316,19 @@ Box App Primary Access Token
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_BOX_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-box-sub-type
|
||||
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: box_sub_type
|
||||
- Env Var: RCLONE_BOX_BOX_SUB_TYPE
|
||||
- Type: string
|
||||
@@ -337,10 +347,12 @@ Here are the advanced options specific to box (Box).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_BOX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-auth-url
|
||||
|
||||
@@ -348,10 +360,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_BOX_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-token-url
|
||||
|
||||
@@ -359,15 +373,19 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_BOX_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-root-folder-id
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_BOX_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
@@ -377,6 +395,8 @@ Fill in for rclone to use a non root folder as its starting point.
|
||||
|
||||
Cutoff for switching to multipart upload (>= 50 MiB).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_BOX_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -386,6 +406,8 @@ Cutoff for switching to multipart upload (>= 50 MiB).
|
||||
|
||||
Max number of times to try committing a multipart file.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: commit_retries
|
||||
- Env Var: RCLONE_BOX_COMMIT_RETRIES
|
||||
- Type: int
|
||||
@@ -395,6 +417,8 @@ Max number of times to try committing a multipart file.
|
||||
|
||||
Size of listing chunk 1-1000.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_BOX_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -404,17 +428,21 @@ Size of listing chunk 1-1000.
|
||||
|
||||
Only show items owned by the login (email address) passed in.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: owned_by
|
||||
- Env Var: RCLONE_BOX_OWNED_BY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --box-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_BOX_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -316,28 +316,34 @@ Remote to cache.
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_CACHE_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --cache-plex-url
|
||||
|
||||
The URL of the Plex server.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: plex_url
|
||||
- Env Var: RCLONE_CACHE_PLEX_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-plex-username
|
||||
|
||||
The username of the Plex user.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: plex_username
|
||||
- Env Var: RCLONE_CACHE_PLEX_USERNAME
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-plex-password
|
||||
|
||||
@@ -345,10 +351,12 @@ The password of the Plex user.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: plex_password
|
||||
- Env Var: RCLONE_CACHE_PLEX_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-chunk-size
|
||||
|
||||
@@ -358,6 +366,8 @@ Use lower numbers for slower connections. If the chunk size is
|
||||
changed, any downloaded chunks will be invalid and cache-chunk-path
|
||||
will need to be cleared or unexpected EOF errors will occur.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_CACHE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -376,6 +386,8 @@ How long to cache file structure information (directory listings, file size, tim
|
||||
If all write operations are done through the cache then you can safely make
|
||||
this value very large as the cache store will also be updated in real time.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: info_age
|
||||
- Env Var: RCLONE_CACHE_INFO_AGE
|
||||
- Type: Duration
|
||||
@@ -395,6 +407,8 @@ The total size that the chunks can take up on the local disk.
|
||||
If the cache exceeds this value then it will start to delete the
|
||||
oldest chunks until it goes under this value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_total_size
|
||||
- Env Var: RCLONE_CACHE_CHUNK_TOTAL_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -415,19 +429,23 @@ Here are the advanced options specific to cache (Cache a remote).
|
||||
|
||||
The plex token for authentication - auto set normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: plex_token
|
||||
- Env Var: RCLONE_CACHE_PLEX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-plex-insecure
|
||||
|
||||
Skip all certificate verification when connecting to the Plex server.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: plex_insecure
|
||||
- Env Var: RCLONE_CACHE_PLEX_INSECURE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-db-path
|
||||
|
||||
@@ -435,6 +453,8 @@ Directory to store file structure metadata DB.
|
||||
|
||||
The remote name is used as the DB file name.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: db_path
|
||||
- Env Var: RCLONE_CACHE_DB_PATH
|
||||
- Type: string
|
||||
@@ -451,6 +471,8 @@ This config follows the "--cache-db-path". If you specify a custom
|
||||
location for "--cache-db-path" and don't specify one for "--cache-chunk-path"
|
||||
then "--cache-chunk-path" will use the same path as "--cache-db-path".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_path
|
||||
- Env Var: RCLONE_CACHE_CHUNK_PATH
|
||||
- Type: string
|
||||
@@ -460,6 +482,8 @@ then "--cache-chunk-path" will use the same path as "--cache-db-path".
|
||||
|
||||
Clear all the cached data for this remote on start.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: db_purge
|
||||
- Env Var: RCLONE_CACHE_DB_PURGE
|
||||
- Type: bool
|
||||
@@ -473,6 +497,8 @@ The default value should be ok for most people. If you find that the
|
||||
cache goes over "cache-chunk-total-size" too often then try to lower
|
||||
this value to force it to perform cleanups more often.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_clean_interval
|
||||
- Env Var: RCLONE_CACHE_CHUNK_CLEAN_INTERVAL
|
||||
- Type: Duration
|
||||
@@ -490,6 +516,8 @@ cache isn't able to provide file data anymore.
|
||||
For really slow connections, increase this to a point where the stream is
|
||||
able to provide data but your experience will be very stuttering.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: read_retries
|
||||
- Env Var: RCLONE_CACHE_READ_RETRIES
|
||||
- Type: int
|
||||
@@ -509,6 +537,8 @@ more fluid and data will be available much more faster to readers.
|
||||
setting will adapt to the type of reading performed and the value
|
||||
specified here will be used as a maximum number of workers to use.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: workers
|
||||
- Env Var: RCLONE_CACHE_WORKERS
|
||||
- Type: int
|
||||
@@ -531,6 +561,8 @@ If the hardware permits it, use this feature to provide an overall better
|
||||
performance during streaming but it can also be disabled if RAM is not
|
||||
available on the local machine.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_no_memory
|
||||
- Env Var: RCLONE_CACHE_CHUNK_NO_MEMORY
|
||||
- Type: bool
|
||||
@@ -556,6 +588,8 @@ useless but it is available to set for more special cases.
|
||||
other API calls to the cloud provider like directory listings will
|
||||
still pass.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: rps
|
||||
- Env Var: RCLONE_CACHE_RPS
|
||||
- Type: int
|
||||
@@ -569,6 +603,8 @@ If you need to read files immediately after you upload them through
|
||||
cache you can enable this flag to have their data stored in the
|
||||
cache store at the same time during upload.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: writes
|
||||
- Env Var: RCLONE_CACHE_WRITES
|
||||
- Type: bool
|
||||
@@ -585,10 +621,12 @@ Specifying a value will enable this feature. Without it, it is
|
||||
completely disabled and files will be uploaded directly to the cloud
|
||||
provider
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: tmp_upload_path
|
||||
- Env Var: RCLONE_CACHE_TMP_UPLOAD_PATH
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --cache-tmp-wait-time
|
||||
|
||||
@@ -600,6 +638,8 @@ _cache-tmp-upload-path_ before it is selected for upload.
|
||||
Note that only one file is uploaded at a time and it can take longer
|
||||
to start the upload if a queue formed for this purpose.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: tmp_wait_time
|
||||
- Env Var: RCLONE_CACHE_TMP_WAIT_TIME
|
||||
- Type: Duration
|
||||
@@ -615,6 +655,8 @@ error.
|
||||
|
||||
If you set it to 0 then it will wait forever.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: db_wait_time
|
||||
- Env Var: RCLONE_CACHE_DB_WAIT_TIME
|
||||
- Type: Duration
|
||||
@@ -634,7 +676,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### stats
|
||||
|
||||
|
||||
@@ -5,6 +5,138 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.58.0 - 2022-03-18
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.57.0...v1.58.0)
|
||||
|
||||
* New backends
|
||||
* [Akamai Netstorage](/netstorage) (Nil Alexandrov)
|
||||
* [Seagate Lyve](/s3/#lyve), [SeaweedFS](/s3/#seaweedfs), [Storj](/s3/#storj), [RackCorp](/s3/#RackCorp) via s3 backend
|
||||
* [Storj](/storj/) (renamed from Tardigrade - your old config files will continue working)
|
||||
* New commands
|
||||
* [bisync](/bisync/) - experimental bidirectional cloud sync (Ivan Andreev, Chris Nelson)
|
||||
* New Features
|
||||
* build
|
||||
* Add `windows/arm64` build (`rclone mount` not supported yet) (Nick Craig-Wood)
|
||||
* Raise minimum go version to go1.15 (Nick Craig-Wood)
|
||||
* config: Allow dot in remote names and improve config editing (albertony)
|
||||
* dedupe: Add quit as a choice in interactive mode (albertony)
|
||||
* dlna: Change icons to the newest ones. (Alain Nussbaumer)
|
||||
* filter: Add [`{{ regexp }}` syntax](/filtering/#regexp) to pattern matches (Nick Craig-Wood)
|
||||
* fshttp: Add prometheus metrics for HTTP status code (Michał Matczuk)
|
||||
* hashsum: Support creating hash from data received on stdin (albertony)
|
||||
* librclone
|
||||
* Allow empty string or null input instead of empty json object (albertony)
|
||||
* Add support for mount commands (albertony)
|
||||
* operations: Add server-side moves to stats (Ole Frost)
|
||||
* rc: Allow user to disable authentication for web gui (negative0)
|
||||
* tree: Remove obsolete `--human` replaced by global `--human-readable` (albertony)
|
||||
* version: Report correct friendly-name for newer Windows 10/11 versions (albertony)
|
||||
* Bug Fixes
|
||||
* build
|
||||
* Fix ARM architecture version in .deb packages after nfpm change (Nick Craig-Wood)
|
||||
* Hard fork `github.com/jlaffaye/ftp` to fix `go get github.com/rclone/rclone` (Nick Craig-Wood)
|
||||
* oauthutil: Fix crash when webrowser requests `/robots.txt` (Nick Craig-Wood)
|
||||
* operations: Fix goroutine leak in case of copy retry (Ankur Gupta)
|
||||
* rc:
|
||||
* Fix `operations/publiclink` default for `expires` parameter (Nick Craig-Wood)
|
||||
* Fix missing computation of `transferQueueSize` when summing up statistics group (Carlo Mion)
|
||||
* Fix missing `StatsInfo` fields in the computation of the group sum (Carlo Mion)
|
||||
* sync: Fix `--max-duration` so it doesn't retry when the duration is exceeded (Nick Craig-Wood)
|
||||
* touch: Fix issue where a directory is created instead of a file (albertony)
|
||||
* Mount
|
||||
* Add `--devname` to set the device name sent to FUSE for mount display (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Add `vfs/stats` remote control to show statistics (Nick Craig-Wood)
|
||||
* Fix `failed to _ensure cache internal error: downloaders is nil error` (Nick Craig-Wood)
|
||||
* Fix handling of special characters in file names (Bumsu Hyeon)
|
||||
* Local
|
||||
* Fix hash invalidation which caused errors with local crypt mount (Nick Craig-Wood)
|
||||
* Crypt
|
||||
* Add `base64` and `base32768` filename encoding options (Max Sum, Sinan Tan)
|
||||
* Azure Blob
|
||||
* Implement `--azureblob-upload-concurrency` parameter to speed uploads (Nick Craig-Wood)
|
||||
* Remove 100MB upper limit on `chunk_size` as it is no longer needed (Nick Craig-Wood)
|
||||
* Raise `--azureblob-upload-concurrency` to 16 by default (Nick Craig-Wood)
|
||||
* Fix crash with SAS URL and no container (Nick Craig-Wood)
|
||||
* Compress
|
||||
* Fix crash if metadata upload failed (Nick Craig-Wood)
|
||||
* Fix memory leak (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Added `--drive-copy-shortcut-content` (Abhiraj)
|
||||
* Disable OAuth OOB flow (copy a token) due to Google deprecation (Nick Craig-Wood)
|
||||
* See [the deprecation note](https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob).
|
||||
* Add `--drive-skip-dangling-shortcuts` flag (Nick Craig-Wood)
|
||||
* When using a link type `--drive-export-formats` shows all doc types (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Speed up directory listings by specifying 1000 items in a chunk (Nick Craig-Wood)
|
||||
* Save an API request when at the root (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Implemented About functionality (Gourav T)
|
||||
* FTP
|
||||
* Add `--ftp-ask-password` to prompt for password when needed (Borna Butkovic)
|
||||
* Google Cloud Storage
|
||||
* Add missing regions (Nick Craig-Wood)
|
||||
* Disable OAuth OOB flow (copy a token) due to Google deprecation (Nick Craig-Wood)
|
||||
* See [the deprecation note](https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob).
|
||||
* Googlephotos
|
||||
* Disable OAuth OOB flow (copy a token) due to Google deprecation (Nick Craig-Wood)
|
||||
* See [the deprecation note](https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html#disallowed-oob).
|
||||
* Hasher
|
||||
* Fix crash on object not found (Nick Craig-Wood)
|
||||
* Hdfs
|
||||
* Add file (Move) and directory move (DirMove) support (Andy Jackson)
|
||||
* HTTP
|
||||
* Improved recognition of URL pointing to a single file (albertony)
|
||||
* Jottacloud
|
||||
* Change API used by recursive list (ListR) (Kim)
|
||||
* Add support for Tele2 Cloud (Fredric Arklid)
|
||||
* Koofr
|
||||
* Add Digistorage service as a Koofr provider. (jaKa)
|
||||
* Mailru
|
||||
* Fix int32 overflow on arm32 (Ivan Andreev)
|
||||
* Onedrive
|
||||
* Add config option for oauth scope `Sites.Read.All` (Charlie Jiang)
|
||||
* Minor optimization of quickxorhash (Isaac Levy)
|
||||
* Add `--onedrive-root-folder-id` flag (Nick Craig-Wood)
|
||||
* Do not retry on `400 pathIsTooLong` error (ctrl-q)
|
||||
* Pcloud
|
||||
* Add support for recursive list (ListR) (Niels van de Weem)
|
||||
* Fix pre-1970 time stamps (Nick Craig-Wood)
|
||||
* S3
|
||||
* Use `ListObjectsV2` for faster listings (Felix Bünemann)
|
||||
* Fallback to `ListObject` v1 on unsupported providers (Nick Craig-Wood)
|
||||
* Use the `ETag` on multipart transfers to verify the transfer was OK (Nick Craig-Wood)
|
||||
* Add `--s3-use-multipart-etag` provider quirk to disable this on unsupported providers (Nick Craig-Wood)
|
||||
* New Providers
|
||||
* RackCorp object storage (bbabich)
|
||||
* Seagate Lyve Cloud storage (Nick Craig-Wood)
|
||||
* SeaweedFS (Chris Lu)
|
||||
* Storj Shared gateways (Márton Elek, Nick Craig-Wood)
|
||||
* Add Wasabi AP Northeast 2 endpoint info (lindwurm)
|
||||
* Add `GLACIER_IR` storage class (Yunhai Luo)
|
||||
* Document `Content-MD5` workaround for object-lock enabled buckets (Paulo Martins)
|
||||
* Fix multipart upload with `--no-head` flag (Nick Craig-Wood)
|
||||
* Simplify content length processing in s3 with download url (Logeshwaran Murugesan)
|
||||
* SFTP
|
||||
* Add rclone to list of supported `md5sum`/`sha1sum` commands to look for (albertony)
|
||||
* Refactor so we only have one way of running remote commands (Nick Craig-Wood)
|
||||
* Fix timeout on hashing large files by sending keepalives (Nick Craig-Wood)
|
||||
* Fix unecessary seeking when uploading and downloading files (Nick Craig-Wood)
|
||||
* Update docs on how to create `known_hosts` file (Nick Craig-Wood)
|
||||
* Storj
|
||||
* Rename tardigrade backend to storj backend (Nick Craig-Wood)
|
||||
* Implement server side Move for files (Nick Craig-Wood)
|
||||
* Update docs to explain differences between s3 and this backend (Elek, Márton)
|
||||
* Swift
|
||||
* Fix About so it shows info about the current container only (Nick Craig-Wood)
|
||||
* Union
|
||||
* Fix treatment of remotes with `//` in (Nick Craig-Wood)
|
||||
* Fix deadlock when one part of a multi-upload fails (Nick Craig-Wood)
|
||||
* Fix eplus policy returned nil (Vitor Arruda)
|
||||
* Yandex
|
||||
* Add permanent deletion support (deinferno)
|
||||
|
||||
## v1.57.0 - 2021-11-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.56.0...v1.57.0)
|
||||
|
||||
@@ -322,15 +322,19 @@ Remote to chunk/unchunk.
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_CHUNKER_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --chunker-chunk-size
|
||||
|
||||
Files larger than chunk size will be split in chunks.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_CHUNKER_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -342,6 +346,8 @@ Choose how chunker handles hash sums.
|
||||
|
||||
All modes but "none" require metadata.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hash_type
|
||||
- Env Var: RCLONE_CHUNKER_HASH_TYPE
|
||||
- Type: string
|
||||
@@ -378,6 +384,8 @@ If chunk number has less digits than the number of hashes, it is left-padded by
|
||||
If there are more digits in the number, they are left as is.
|
||||
Possible chunk files are ignored if their name does not match given format.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: name_format
|
||||
- Env Var: RCLONE_CHUNKER_NAME_FORMAT
|
||||
- Type: string
|
||||
@@ -389,6 +397,8 @@ Minimum valid chunk number. Usually 0 or 1.
|
||||
|
||||
By default chunk numbers start from 1.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: start_from
|
||||
- Env Var: RCLONE_CHUNKER_START_FROM
|
||||
- Type: int
|
||||
@@ -401,6 +411,8 @@ Format of the metadata object or "none".
|
||||
By default "simplejson".
|
||||
Metadata is a small JSON file named after the composite file.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: meta_format
|
||||
- Env Var: RCLONE_CHUNKER_META_FORMAT
|
||||
- Type: string
|
||||
@@ -418,6 +430,8 @@ Metadata is a small JSON file named after the composite file.
|
||||
|
||||
Choose how chunker should handle files with missing or invalid chunks.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: fail_hard
|
||||
- Env Var: RCLONE_CHUNKER_FAIL_HARD
|
||||
- Type: bool
|
||||
@@ -432,6 +446,8 @@ Choose how chunker should handle files with missing or invalid chunks.
|
||||
|
||||
Choose how chunker should handle temporary files during transactions.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: transactions
|
||||
- Env Var: RCLONE_CHUNKER_TRANSACTIONS
|
||||
- Type: string
|
||||
|
||||
@@ -36,7 +36,8 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
|
||||
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend specific command.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
|
||||
* [rclone bisync](/commands/rclone_bisync/) - Perform bidirectonal synchronization between two paths.
|
||||
* [rclone cat](/commands/rclone_cat/) - Concatenates any files and sends them to stdout.
|
||||
* [rclone check](/commands/rclone_check/) - Checks the files in the source and destination match.
|
||||
* [rclone checksum](/commands/rclone_checksum/) - Checks the files in the source against a SUM file.
|
||||
|
||||
@@ -42,7 +42,7 @@ Applying a `--full` flag to the command prints the bytes in full, e.g.
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
|
||||
A `--json` flag generates conveniently computer readable output, e.g.
|
||||
A `--json` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
---
|
||||
title: "rclone backend"
|
||||
description: "Run a backend specific command."
|
||||
description: "Run a backend-specific command."
|
||||
slug: rclone_backend
|
||||
url: /commands/rclone_backend/
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/backend/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone backend
|
||||
|
||||
Run a backend specific command.
|
||||
Run a backend-specific command.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
This runs a backend specific command. The commands themselves (except
|
||||
This runs a backend-specific command. The commands themselves (except
|
||||
for "help" and "features") are defined by the backends and you should
|
||||
see the backend docs for definitions.
|
||||
|
||||
@@ -22,7 +22,7 @@ You can discover what commands a backend implements by using
|
||||
rclone backend help <backendname>
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
@@ -36,7 +36,7 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend/command) in the rc docs.
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -13,7 +13,7 @@ Checks the files in the source and destination match.
|
||||
|
||||
|
||||
Checks the files in the source and destination match. It compares
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files which don't
|
||||
sizes and hashes (MD5 or SHA1) and logs a report of files that don't
|
||||
match. It doesn't alter the source or destination.
|
||||
|
||||
If you supply the `--size-only` flag, it will only compare the sizes not
|
||||
|
||||
@@ -15,7 +15,7 @@ Create a new remote with name, type and options.
|
||||
Create a new remote of `name` with `type` and options. The options
|
||||
should be passed in pairs of `key` `value` or as `key=value`.
|
||||
|
||||
For example to make a swift remote of name myremote using auto config
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
@@ -107,9 +107,8 @@ At the end of the non interactive process, rclone will return a result
|
||||
with `State` as empty string.
|
||||
|
||||
If `--all` is passed then rclone will ask all the config questions,
|
||||
not just the post config questions. Parameters that are supplied on
|
||||
the command line or from environment variables are used as defaults
|
||||
for questions as usual.
|
||||
not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that `bin/config.py` in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
|
||||
@@ -16,7 +16,7 @@ Update an existing remote's password. The password
|
||||
should be passed in pairs of `key` `password` or as `key=password`.
|
||||
The `password` should be passed in in clear (unobscured).
|
||||
|
||||
For example to set password of a remote of name myremote you would do:
|
||||
For example, to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|
||||
@@ -15,7 +15,7 @@ Update options in an existing remote.
|
||||
Update an existing remote's options. The options should be passed in
|
||||
pairs of `key` `value` or as `key=value`.
|
||||
|
||||
For example to update the env_auth field of a remote of name myremote
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote env_auth true
|
||||
|
||||
@@ -32,7 +32,7 @@ name. It will do this iteratively until all the identically named
|
||||
directories have been merged.
|
||||
|
||||
Next, if deduping by name, for every group of duplicate file names /
|
||||
hashes, it will delete all but one identical files it finds without
|
||||
hashes, it will delete all but one identical file it finds without
|
||||
confirmation. This means that for most duplicated files the `dedupe` command will not be interactive.
|
||||
|
||||
`dedupe` considers files to be identical if they have the
|
||||
@@ -43,7 +43,7 @@ identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
|
||||
Next rclone will resolve the remaining duplicates. Exactly which
|
||||
action is taken depends on the dedupe mode. By default rclone will
|
||||
action is taken depends on the dedupe mode. By default, rclone will
|
||||
interactively query the user for each one.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
@@ -74,8 +74,7 @@ Now the `dedupe` session
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
q) Quit
|
||||
s/k/r/q> k
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
@@ -86,8 +85,7 @@ Now the `dedupe` session
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
q) Quit
|
||||
s/k/r/q> r
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
@@ -112,7 +110,7 @@ Dedupe can be run non interactively using the `--dedupe-mode` flag or by using a
|
||||
* `--dedupe-mode rename` - removes identical files then renames the rest to be different.
|
||||
* `--dedupe-mode list` - lists duplicate dirs and files only and changes nothing.
|
||||
|
||||
For example to rename all the identically named photos in your Google Photos directory, do
|
||||
For example, to rename all the identically named photos in your Google Photos directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
|
||||
@@ -128,7 +126,7 @@ rclone dedupe [mode] remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--by-hash Find indentical hashes rather than names
|
||||
--by-hash Find identical hashes rather than names
|
||||
--dedupe-mode string Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename (default "interactive")
|
||||
-h, --help help for dedupe
|
||||
```
|
||||
|
||||
@@ -21,6 +21,11 @@ not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling any hash for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
$ rclone hashsum
|
||||
|
||||
@@ -34,17 +34,17 @@ There are several related list commands
|
||||
* `lsf` to list objects and directories in easy to parse format
|
||||
* `lsjson` to list objects and directories in JSON format
|
||||
|
||||
`ls`,`lsl`,`lsd` are designed to be human readable.
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
`ls`,`lsl`,`lsd` are designed to be human-readable.
|
||||
`lsf` is designed to be human and machine-readable.
|
||||
`lsjson` is designed to be machine-readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -44,17 +44,17 @@ There are several related list commands
|
||||
* `lsf` to list objects and directories in easy to parse format
|
||||
* `lsjson` to list objects and directories in JSON format
|
||||
|
||||
`ls`,`lsl`,`lsd` are designed to be human readable.
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
`ls`,`lsl`,`lsd` are designed to be human-readable.
|
||||
`lsf` is designed to be human and machine-readable.
|
||||
`lsjson` is designed to be machine-readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -59,13 +59,13 @@ can be returned as an empty string if it isn't available on the object
|
||||
the object and "UNSUPPORTED" if that object does not support that hash
|
||||
type.
|
||||
|
||||
For example to emulate the md5sum command you can use
|
||||
For example, to emulate the md5sum command you can use
|
||||
|
||||
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
||||
|
||||
Eg
|
||||
|
||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
||||
cd65ac234e6fea5925974a51cdd865cc canole
|
||||
03b5341b4f234b9d984d03ad076bae91 diwogej7
|
||||
@@ -100,7 +100,7 @@ Eg
|
||||
Note that the --absolute parameter is useful for making lists of files
|
||||
to pass to an rclone copy with the --files-from-raw flag.
|
||||
|
||||
For example to find all the files modified within one day and copy
|
||||
For example, to find all the files modified within one day and copy
|
||||
those only (without traversing the whole directory structure):
|
||||
|
||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||
@@ -117,17 +117,17 @@ There are several related list commands
|
||||
* `lsf` to list objects and directories in easy to parse format
|
||||
* `lsjson` to list objects and directories in JSON format
|
||||
|
||||
`ls`,`lsl`,`lsd` are designed to be human readable.
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
`ls`,`lsl`,`lsd` are designed to be human-readable.
|
||||
`lsf` is designed to be human and machine-readable.
|
||||
`lsjson` is designed to be machine-readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -66,7 +66,7 @@ If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
||||
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
|
||||
When used without --recursive the Path will always be the same as Name.
|
||||
|
||||
If the directory is a bucket in a bucket based backend, then
|
||||
If the directory is a bucket in a bucket-based backend, then
|
||||
"IsBucket" will be set to true. This key won't be present unless it is
|
||||
"true".
|
||||
|
||||
@@ -91,17 +91,17 @@ There are several related list commands
|
||||
* `lsf` to list objects and directories in easy to parse format
|
||||
* `lsjson` to list objects and directories in JSON format
|
||||
|
||||
`ls`,`lsl`,`lsd` are designed to be human readable.
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
`ls`,`lsl`,`lsd` are designed to be human-readable.
|
||||
`lsf` is designed to be human and machine-readable.
|
||||
`lsjson` is designed to be machine-readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -34,17 +34,17 @@ There are several related list commands
|
||||
* `lsf` to list objects and directories in easy to parse format
|
||||
* `lsjson` to list objects and directories in JSON format
|
||||
|
||||
`ls`,`lsl`,`lsd` are designed to be human readable.
|
||||
`lsf` is designed to be human and machine readable.
|
||||
`lsjson` is designed to be machine readable.
|
||||
`ls`,`lsl`,`lsd` are designed to be human-readable.
|
||||
`lsf` is designed to be human and machine-readable.
|
||||
`lsjson` is designed to be machine-readable.
|
||||
|
||||
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
|
||||
|
||||
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
|
||||
|
||||
Listing a non existent directory will produce an error except for
|
||||
Listing a non-existent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket based remotes).
|
||||
the bucket-based remotes).
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -20,6 +20,11 @@ not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling MD5 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
|
||||
```
|
||||
rclone md5sum remote:path [flags]
|
||||
|
||||
@@ -75,7 +75,7 @@ at all, then 1 PiB is set as both the total and the free size.
|
||||
To run rclone mount on Windows, you will need to
|
||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open source
|
||||
[WinFsp](https://github.com/billziss-gh/winfsp) is an open-source
|
||||
Windows File System Proxy which makes it easy to write user space file
|
||||
systems for Windows. It provides a FUSE emulation layer which rclone
|
||||
uses combination with [cgofuse](https://github.com/billziss-gh/cgofuse).
|
||||
@@ -245,7 +245,7 @@ applications won't work with their files on an rclone mount without
|
||||
`--vfs-cache-mode writes` or `--vfs-cache-mode full`.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
@@ -689,6 +689,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
--daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
|
||||
--debug-fuse Debug the FUSE internals - needs -v
|
||||
--default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
|
||||
--devname string Set the device name - default is remote:path
|
||||
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 0777)
|
||||
--file-perms FileMode File permissions (default 0666)
|
||||
|
||||
@@ -11,7 +11,7 @@ Obscure password for use in the rclone config file.
|
||||
|
||||
## Synopsis
|
||||
|
||||
In the rclone config file, human readable passwords are
|
||||
In the rclone config file, human-readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
passwords as rclone can decrypt them - it is to prevent "eyedropping"
|
||||
|
||||
@@ -349,6 +349,7 @@ rclone serve docker [flags]
|
||||
--daemon-wait duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
|
||||
--debug-fuse Debug the FUSE internals - needs -v
|
||||
--default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
|
||||
--devname string Set the device name - default is remote:path
|
||||
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 0777)
|
||||
--file-perms FileMode File permissions (default 0666)
|
||||
|
||||
@@ -59,6 +59,9 @@ supply --client-ca also.
|
||||
of that with the CA certificate. --key should be the PEM encoded
|
||||
private key and --client-ca should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
### Template
|
||||
|
||||
--template allows a user to specify a custom markup template for http
|
||||
and webdav serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
@@ -15,7 +15,7 @@ rclone serve restic implements restic's REST backend API
|
||||
over HTTP. This allows restic to use rclone as a data storage
|
||||
mechanism for cloud providers that restic does not support directly.
|
||||
|
||||
[Restic](https://restic.net/) is a command line program for doing
|
||||
[Restic](https://restic.net/) is a command-line program for doing
|
||||
backups.
|
||||
|
||||
The server will log errors. Use -v to see access logs.
|
||||
@@ -194,7 +194,7 @@ rclone serve restic remote:path [flags]
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--pass string Password for authentication
|
||||
--private-repos Users can only access their private repo
|
||||
--realm string realm for authentication (default "rclone")
|
||||
--realm string Realm for authentication (default "rclone")
|
||||
--server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--stdio Run an HTTP2 server on stdin/stdout
|
||||
|
||||
@@ -49,6 +49,17 @@ be used with sshd via ~/.ssh/authorized_keys, for example:
|
||||
|
||||
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
|
||||
|
||||
On the client you need to set "--transfers 1" when using --stdio.
|
||||
Otherwise multiple instances of the rclone server are started by OpenSSH
|
||||
which can lead to "corrupted on transfer" errors. This is the case because
|
||||
the client chooses indiscriminately which server to send commands to while
|
||||
the servers all have different views of the state of the filing system.
|
||||
|
||||
The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing
|
||||
used. Omitting "restrict" and using --sftp-path-override to enable
|
||||
checksumming is possible but less secure and you could use the SFTP server
|
||||
provided by OpenSSH in this case.
|
||||
|
||||
|
||||
## VFS - Virtual File System
|
||||
|
||||
@@ -341,7 +352,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
|
||||
ignored.
|
||||
|
||||
There is an example program
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
|
||||
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
|
||||
in the rclone source code.
|
||||
|
||||
The program's job is to take a `user` and `pass` on the input and turn
|
||||
|
||||
@@ -501,7 +501,7 @@ rclone serve webdav remote:path [flags]
|
||||
--pass string Password for authentication
|
||||
--poll-interval duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
|
||||
--read-only Mount read-only
|
||||
--realm string realm for authentication (default "rclone")
|
||||
--realm string Realm for authentication (default "rclone")
|
||||
--server-read-timeout duration Timeout for server reading data (default 1h0m0s)
|
||||
--server-write-timeout duration Timeout for server writing data (default 1h0m0s)
|
||||
--template string User-specified template
|
||||
|
||||
@@ -20,6 +20,14 @@ not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling SHA-1 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
a remote:path.
|
||||
|
||||
|
||||
```
|
||||
rclone sha1sum remote:path [flags]
|
||||
|
||||
@@ -25,7 +25,7 @@ For example
|
||||
└── subdir
|
||||
├── file4
|
||||
└── file5
|
||||
|
||||
|
||||
1 directories, 5 files
|
||||
|
||||
You can use any of the filtering options with the tree command (e.g.
|
||||
@@ -49,7 +49,6 @@ rclone tree remote:path [flags]
|
||||
--dirsfirst List directories before files (-U disables)
|
||||
--full-path Print the full path prefix for each file
|
||||
-h, --help help for tree
|
||||
--human Print the size in a more human readable way.
|
||||
--level int Descend only level directories deep
|
||||
-D, --modtime Print the date of last modification.
|
||||
--noindent Don't print indentation lines
|
||||
|
||||
@@ -96,15 +96,19 @@ Here are the standard options specific to compress (Compress a remote).
|
||||
|
||||
Remote to compress.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_COMPRESS_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --compress-mode
|
||||
|
||||
Compression mode.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: mode
|
||||
- Env Var: RCLONE_COMPRESS_MODE
|
||||
- Type: string
|
||||
@@ -129,6 +133,8 @@ Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: level
|
||||
- Env Var: RCLONE_COMPRESS_LEVEL
|
||||
- Type: int
|
||||
@@ -143,6 +149,8 @@ it's size.
|
||||
Files smaller than this limit will be cached in RAM, files larger than
|
||||
this limit will be cached on disk.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ram_cache_limit
|
||||
- Env Var: RCLONE_COMPRESS_RAM_CACHE_LIMIT
|
||||
- Type: SizeSuffix
|
||||
|
||||
@@ -428,15 +428,19 @@ Remote to encrypt/decrypt.
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or maybe "myremote:" (not recommended).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_CRYPT_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --crypt-filename-encryption
|
||||
|
||||
How to encrypt the filenames.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: filename_encryption
|
||||
- Env Var: RCLONE_CRYPT_FILENAME_ENCRYPTION
|
||||
- Type: string
|
||||
@@ -457,6 +461,8 @@ Option to either encrypt directory names or leave them intact.
|
||||
|
||||
NB If filename_encryption is "off" then this option will do nothing.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: directory_name_encryption
|
||||
- Env Var: RCLONE_CRYPT_DIRECTORY_NAME_ENCRYPTION
|
||||
- Type: bool
|
||||
@@ -473,10 +479,12 @@ Password or pass phrase for encryption.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_CRYPT_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --crypt-password2
|
||||
|
||||
@@ -487,10 +495,12 @@ Should be different to the previous password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password2
|
||||
- Env Var: RCLONE_CRYPT_PASSWORD2
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -509,6 +519,8 @@ pointing to two different directories with the single changed
|
||||
parameter and use rclone move to move the files between the crypt
|
||||
remotes.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: server_side_across_configs
|
||||
- Env Var: RCLONE_CRYPT_SERVER_SIDE_ACROSS_CONFIGS
|
||||
- Type: bool
|
||||
@@ -526,6 +538,8 @@ This is so you can work out which encrypted names are which decrypted
|
||||
names just in case you need to do something with the encrypted file
|
||||
names, or for debugging purposes.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: show_mapping
|
||||
- Env Var: RCLONE_CRYPT_SHOW_MAPPING
|
||||
- Type: bool
|
||||
@@ -535,6 +549,8 @@ names, or for debugging purposes.
|
||||
|
||||
Option to either encrypt file data or leave it unencrypted.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_data_encryption
|
||||
- Env Var: RCLONE_CRYPT_NO_DATA_ENCRYPTION
|
||||
- Type: bool
|
||||
@@ -545,6 +561,29 @@ Option to either encrypt file data or leave it unencrypted.
|
||||
- "false"
|
||||
- Encrypt file data.
|
||||
|
||||
#### --crypt-filename-encoding
|
||||
|
||||
How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: filename_encoding
|
||||
- Env Var: RCLONE_CRYPT_FILENAME_ENCODING
|
||||
- Type: string
|
||||
- Default: "base32"
|
||||
- Examples:
|
||||
- "base32"
|
||||
- Encode using base32. Suitable for all remote.
|
||||
- "base64"
|
||||
- Encode using base64. Suitable for case sensitive remote.
|
||||
- "base32768"
|
||||
- Encode using base32768. Suitable if your remote counts UTF-16 or
|
||||
- Unicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the crypt backend.
|
||||
@@ -559,7 +598,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### encode
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ See the following for detailed instructions for
|
||||
* [HDFS](/hdfs/)
|
||||
* [HTTP](/http/)
|
||||
* [Hubic](/hubic/)
|
||||
* [Internet Archive](/internetarchive/)
|
||||
* [Jottacloud](/jottacloud/)
|
||||
* [Koofr](/koofr/)
|
||||
* [Mail.ru Cloud](/mailru/)
|
||||
@@ -939,22 +940,22 @@ unit prefix appended to the value (e.g. `9.762Ki`), while in more textual output
|
||||
the full unit is shown (e.g. `9.762 KiB`). For counts the SI standard notation is
|
||||
used, e.g. prefix `k` for kilo. Used with file counts, `1k` means 1000 files.
|
||||
|
||||
The various [list](commands/rclone_ls/) commands output raw numbers by default.
|
||||
The various [list](/commands/rclone_ls/) commands output raw numbers by default.
|
||||
Option `--human-readable` will make them output values in human-readable format
|
||||
instead (with the short unit prefix).
|
||||
|
||||
The [about](commands/rclone_about/) command outputs human-readable by default,
|
||||
The [about](/commands/rclone_about/) command outputs human-readable by default,
|
||||
with a command-specific option `--full` to output the raw numbers instead.
|
||||
|
||||
Command [size](commands/rclone_size/) outputs both human-readable and raw numbers
|
||||
Command [size](/commands/rclone_size/) outputs both human-readable and raw numbers
|
||||
in the same output.
|
||||
|
||||
The [tree](commands/rclone_tree/) command also considers `--human-readable`, but
|
||||
The [tree](/commands/rclone_tree/) command also considers `--human-readable`, but
|
||||
it will not use the exact same notation as the other commands: It rounds to one
|
||||
decimal, and uses single letter suffix, e.g. `K` instead of `Ki`. The reason for
|
||||
this is that it relies on an external library.
|
||||
|
||||
The interactive command [ncdu](commands/rclone_ncdu/) shows human-readable by
|
||||
The interactive command [ncdu](/commands/rclone_ncdu/) shows human-readable by
|
||||
default, and responds to key `u` for toggling human-readable format.
|
||||
|
||||
### --ignore-case-sync ###
|
||||
@@ -1793,6 +1794,8 @@ of timeouts or bigger if you have lots of bandwidth and a fast remote.
|
||||
|
||||
The default is to run 4 file transfers in parallel.
|
||||
|
||||
Look at --multi-thread-streams if you would like to control single file transfers.
|
||||
|
||||
### -u, --update ###
|
||||
|
||||
This forces rclone to skip any files which exist on the destination
|
||||
|
||||
@@ -554,10 +554,12 @@ Setting your own is recommended.
|
||||
See https://rclone.org/drive/#making-your-own-client-id for how to create your own.
|
||||
If you leave this blank, it will use an internal key which is low performance.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_DRIVE_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-client-secret
|
||||
|
||||
@@ -565,19 +567,23 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_DRIVE_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-scope
|
||||
|
||||
Scope that rclone should use when requesting access from drive.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: scope
|
||||
- Env Var: RCLONE_DRIVE_SCOPE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "drive"
|
||||
- Full access all files, excluding Application Data Folder.
|
||||
@@ -603,10 +609,12 @@ Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_DRIVE_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-service-account-file
|
||||
|
||||
@@ -617,15 +625,19 @@ Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_account_file
|
||||
- Env Var: RCLONE_DRIVE_SERVICE_ACCOUNT_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-alternate-export
|
||||
|
||||
Deprecated: No longer needed.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: alternate_export
|
||||
- Env Var: RCLONE_DRIVE_ALTERNATE_EXPORT
|
||||
- Type: bool
|
||||
@@ -639,10 +651,12 @@ Here are the advanced options specific to drive (Google Drive).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_DRIVE_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-auth-url
|
||||
|
||||
@@ -650,10 +664,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_DRIVE_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-token-url
|
||||
|
||||
@@ -661,10 +677,12 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_DRIVE_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-service-account-credentials
|
||||
|
||||
@@ -673,24 +691,30 @@ Service Account Credentials JSON blob.
|
||||
Leave blank normally.
|
||||
Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_account_credentials
|
||||
- Env Var: RCLONE_DRIVE_SERVICE_ACCOUNT_CREDENTIALS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-team-drive
|
||||
|
||||
ID of the Shared Drive (Team Drive).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: team_drive
|
||||
- Env Var: RCLONE_DRIVE_TEAM_DRIVE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-auth-owner-only
|
||||
|
||||
Only consider files owned by the authenticated user.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_owner_only
|
||||
- Env Var: RCLONE_DRIVE_AUTH_OWNER_ONLY
|
||||
- Type: bool
|
||||
@@ -703,17 +727,38 @@ Send files to the trash instead of deleting permanently.
|
||||
Defaults to true, namely sending files to the trash.
|
||||
Use `--drive-use-trash=false` to delete files permanently instead.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_trash
|
||||
- Env Var: RCLONE_DRIVE_USE_TRASH
|
||||
- Type: bool
|
||||
- Default: true
|
||||
|
||||
#### --drive-copy-shortcut-content
|
||||
|
||||
Server side copy contents of shortcuts instead of the shortcut.
|
||||
|
||||
When doing server side copies, normally rclone will copy shortcuts as
|
||||
shortcuts.
|
||||
|
||||
If this flag is used then rclone will copy the contents of shortcuts
|
||||
rather than shortcuts themselves when doing server side copies.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_shortcut_content
|
||||
- Env Var: RCLONE_DRIVE_COPY_SHORTCUT_CONTENT
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-skip-gdocs
|
||||
|
||||
Skip google documents in all listings.
|
||||
|
||||
If given, gdocs practically become invisible to rclone.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_gdocs
|
||||
- Env Var: RCLONE_DRIVE_SKIP_GDOCS
|
||||
- Type: bool
|
||||
@@ -734,6 +779,8 @@ Google photos are identified by being in the "photos" space.
|
||||
Corrupted checksums are caused by Google modifying the image/video but
|
||||
not updating the checksum.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_checksum_gphotos
|
||||
- Env Var: RCLONE_DRIVE_SKIP_CHECKSUM_GPHOTOS
|
||||
- Type: bool
|
||||
@@ -750,6 +797,8 @@ with you).
|
||||
This works both with the "list" (lsd, lsl, etc.) and the "copy"
|
||||
commands (copy, sync, etc.), and with all other commands too.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: shared_with_me
|
||||
- Env Var: RCLONE_DRIVE_SHARED_WITH_ME
|
||||
- Type: bool
|
||||
@@ -761,6 +810,8 @@ Only show files that are in the trash.
|
||||
|
||||
This will show trashed files in their original directory structure.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: trashed_only
|
||||
- Env Var: RCLONE_DRIVE_TRASHED_ONLY
|
||||
- Type: bool
|
||||
@@ -770,6 +821,8 @@ This will show trashed files in their original directory structure.
|
||||
|
||||
Only show files that are starred.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: starred_only
|
||||
- Env Var: RCLONE_DRIVE_STARRED_ONLY
|
||||
- Type: bool
|
||||
@@ -779,15 +832,19 @@ Only show files that are starred.
|
||||
|
||||
Deprecated: See export_formats.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: formats
|
||||
- Env Var: RCLONE_DRIVE_FORMATS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-export-formats
|
||||
|
||||
Comma separated list of preferred formats for downloading Google docs.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: export_formats
|
||||
- Env Var: RCLONE_DRIVE_EXPORT_FORMATS
|
||||
- Type: string
|
||||
@@ -797,10 +854,12 @@ Comma separated list of preferred formats for downloading Google docs.
|
||||
|
||||
Comma separated list of preferred formats for uploading Google docs.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: import_formats
|
||||
- Env Var: RCLONE_DRIVE_IMPORT_FORMATS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-allow-import-name-change
|
||||
|
||||
@@ -808,6 +867,8 @@ Allow the filetype to change when uploading Google docs.
|
||||
|
||||
E.g. file.doc to file.docx. This will confuse sync and reupload every time.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: allow_import_name_change
|
||||
- Env Var: RCLONE_DRIVE_ALLOW_IMPORT_NAME_CHANGE
|
||||
- Type: bool
|
||||
@@ -833,6 +894,8 @@ Photos folder" option in your google drive settings. You can then copy
|
||||
or move the photos locally and use the date the image was taken
|
||||
(created) set as the modification date.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_created_date
|
||||
- Env Var: RCLONE_DRIVE_USE_CREATED_DATE
|
||||
- Type: bool
|
||||
@@ -848,6 +911,8 @@ unexpected consequences when uploading/downloading files.
|
||||
If both this flag and "--drive-use-created-date" are set, the created
|
||||
date is used.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_shared_date
|
||||
- Env Var: RCLONE_DRIVE_USE_SHARED_DATE
|
||||
- Type: bool
|
||||
@@ -857,6 +922,8 @@ date is used.
|
||||
|
||||
Size of listing chunk 100-1000, 0 to disable.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_DRIVE_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -866,15 +933,19 @@ Size of listing chunk 100-1000, 0 to disable.
|
||||
|
||||
Impersonate this user when using a service account.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DRIVE_IMPERSONATE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --drive-upload-cutoff
|
||||
|
||||
Cutoff for switching to chunked upload.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_cutoff
|
||||
- Env Var: RCLONE_DRIVE_UPLOAD_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
@@ -891,6 +962,8 @@ is buffered in memory one per transfer.
|
||||
|
||||
Reducing this will reduce memory usage but decrease performance.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_DRIVE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -906,6 +979,8 @@ as malware or spam and cannot be downloaded" with the error code
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: acknowledge_abuse
|
||||
- Env Var: RCLONE_DRIVE_ACKNOWLEDGE_ABUSE
|
||||
- Type: bool
|
||||
@@ -915,6 +990,8 @@ will download it anyway.
|
||||
|
||||
Keep new head revision of each file forever.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: keep_revision_forever
|
||||
- Env Var: RCLONE_DRIVE_KEEP_REVISION_FOREVER
|
||||
- Type: bool
|
||||
@@ -937,6 +1014,8 @@ doing rclone ls/lsl/lsf/lsjson/etc only.
|
||||
If you do use this flag for syncing (not recommended) then you will
|
||||
need to use --ignore size also.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: size_as_quota
|
||||
- Env Var: RCLONE_DRIVE_SIZE_AS_QUOTA
|
||||
- Type: bool
|
||||
@@ -946,6 +1025,8 @@ need to use --ignore size also.
|
||||
|
||||
If Object's are greater, use drive v2 API to download.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: v2_download_min_size
|
||||
- Env Var: RCLONE_DRIVE_V2_DOWNLOAD_MIN_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -955,6 +1036,8 @@ If Object's are greater, use drive v2 API to download.
|
||||
|
||||
Minimum time to sleep between API calls.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pacer_min_sleep
|
||||
- Env Var: RCLONE_DRIVE_PACER_MIN_SLEEP
|
||||
- Type: Duration
|
||||
@@ -964,6 +1047,8 @@ Minimum time to sleep between API calls.
|
||||
|
||||
Number of API calls to allow without sleeping.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pacer_burst
|
||||
- Env Var: RCLONE_DRIVE_PACER_BURST
|
||||
- Type: int
|
||||
@@ -978,6 +1063,8 @@ different Google drives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: server_side_across_configs
|
||||
- Env Var: RCLONE_DRIVE_SERVER_SIDE_ACROSS_CONFIGS
|
||||
- Type: bool
|
||||
@@ -996,6 +1083,8 @@ See: https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_http2
|
||||
- Env Var: RCLONE_DRIVE_DISABLE_HTTP2
|
||||
- Type: bool
|
||||
@@ -1017,6 +1106,8 @@ Google don't document so it may break in the future.
|
||||
See: https://github.com/rclone/rclone/issues/3857
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: stop_on_upload_limit
|
||||
- Env Var: RCLONE_DRIVE_STOP_ON_UPLOAD_LIMIT
|
||||
- Type: bool
|
||||
@@ -1036,6 +1127,8 @@ Note that this detection is relying on error message strings which
|
||||
Google don't document so it may break in the future.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: stop_on_download_limit
|
||||
- Env Var: RCLONE_DRIVE_STOP_ON_DOWNLOAD_LIMIT
|
||||
- Type: bool
|
||||
@@ -1050,17 +1143,35 @@ they are the original file (see [the shortcuts section](#shortcuts)).
|
||||
If this flag is set then rclone will ignore shortcut files completely.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_shortcuts
|
||||
- Env Var: RCLONE_DRIVE_SKIP_SHORTCUTS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-skip-dangling-shortcuts
|
||||
|
||||
If set skip dangling shortcut files.
|
||||
|
||||
If this is set then rclone will not show any dangling shortcuts in listings.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_dangling_shortcuts
|
||||
- Env Var: RCLONE_DRIVE_SKIP_DANGLING_SHORTCUTS
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --drive-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_DRIVE_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -1080,7 +1191,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### get
|
||||
|
||||
@@ -1264,8 +1375,11 @@ and upload the files if you prefer.
|
||||
|
||||
### Limitations of Google Docs
|
||||
|
||||
Google docs will appear as size -1 in `rclone ls` and as size 0 in
|
||||
anything which uses the VFS layer, e.g. `rclone mount`, `rclone serve`.
|
||||
Google docs will appear as size -1 in `rclone ls`, `rclone ncdu` etc,
|
||||
and as size 0 in anything which uses the VFS layer, e.g. `rclone mount`
|
||||
and `rclone serve`. When calculating directory totals, e.g. in
|
||||
`rclone size` and `rclone ncdu`, they will be counted in as empty
|
||||
files.
|
||||
|
||||
This is because rclone can't find out the size of the Google docs
|
||||
without downloading them.
|
||||
|
||||
@@ -190,10 +190,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_DROPBOX_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --dropbox-client-secret
|
||||
|
||||
@@ -201,10 +203,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_DROPBOX_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -214,10 +218,12 @@ Here are the advanced options specific to dropbox (Dropbox).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_DROPBOX_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --dropbox-auth-url
|
||||
|
||||
@@ -225,10 +231,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_DROPBOX_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --dropbox-token-url
|
||||
|
||||
@@ -236,10 +244,12 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_DROPBOX_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --dropbox-chunk-size
|
||||
|
||||
@@ -252,6 +262,8 @@ deal with retries. Setting this larger will increase the speed
|
||||
slightly (at most 10% for 128 MiB in tests) at the cost of using more
|
||||
memory. It can be set smaller if you are tight on memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_DROPBOX_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -276,10 +288,12 @@ permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: impersonate
|
||||
- Env Var: RCLONE_DROPBOX_IMPERSONATE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --dropbox-shared-files
|
||||
|
||||
@@ -289,6 +303,8 @@ In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
|
||||
operations and read operations (e.g. downloading) are supported in this mode.
|
||||
All other operations will be disabled.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: shared_files
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FILES
|
||||
- Type: bool
|
||||
@@ -309,6 +325,8 @@ Note that we don't unmount the shared folder afterwards so the
|
||||
--dropbox-shared-folders can be omitted after the first use of a particular
|
||||
shared folder.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: shared_folders
|
||||
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
|
||||
- Type: bool
|
||||
@@ -332,6 +350,8 @@ Rclone will close any outstanding batches when it exits which may make
|
||||
a delay on quit.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: batch_mode
|
||||
- Env Var: RCLONE_DROPBOX_BATCH_MODE
|
||||
- Type: string
|
||||
@@ -358,6 +378,8 @@ as it will make them a lot quicker. You can use --transfers 32 to
|
||||
maximise throughput.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: batch_size
|
||||
- Env Var: RCLONE_DROPBOX_BATCH_SIZE
|
||||
- Type: int
|
||||
@@ -378,6 +400,8 @@ default based on the batch_mode in use.
|
||||
- batch_mode: off - not in use
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: batch_timeout
|
||||
- Env Var: RCLONE_DROPBOX_BATCH_TIMEOUT
|
||||
- Type: Duration
|
||||
@@ -387,6 +411,8 @@ default based on the batch_mode in use.
|
||||
|
||||
Max time to wait for a batch to finish comitting
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: batch_commit_timeout
|
||||
- Env Var: RCLONE_DROPBOX_BATCH_COMMIT_TIMEOUT
|
||||
- Type: Duration
|
||||
@@ -394,10 +420,12 @@ Max time to wait for a batch to finish comitting
|
||||
|
||||
#### --dropbox-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_DROPBOX_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -122,10 +122,12 @@ Here are the standard options specific to fichier (1Fichier).
|
||||
|
||||
Your API Key, get it from https://1fichier.com/console/params.pl.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_FICHIER_API_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -135,10 +137,12 @@ Here are the advanced options specific to fichier (1Fichier).
|
||||
|
||||
If you want to download a shared folder, add this parameter.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: shared_folder
|
||||
- Env Var: RCLONE_FICHIER_SHARED_FOLDER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --fichier-file-password
|
||||
|
||||
@@ -146,10 +150,12 @@ If you want to download a shared file that is password protected, add this param
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: file_password
|
||||
- Env Var: RCLONE_FICHIER_FILE_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --fichier-folder-password
|
||||
|
||||
@@ -157,17 +163,21 @@ If you want to list the files in a shared folder that is password protected, add
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: folder_password
|
||||
- Env Var: RCLONE_FICHIER_FOLDER_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --fichier-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_FICHIER_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -160,10 +160,12 @@ Here are the standard options specific to filefabric (Enterprise File Fabric).
|
||||
|
||||
URL of the Enterprise File Fabric to connect to.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: url
|
||||
- Env Var: RCLONE_FILEFABRIC_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
- Examples:
|
||||
- "https://storagemadeeasy.com"
|
||||
- Storage Made Easy US
|
||||
@@ -181,10 +183,12 @@ Leave blank normally.
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_FILEFABRIC_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --filefabric-permanent-token
|
||||
|
||||
@@ -200,10 +204,12 @@ These tokens are normally valid for several years.
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: permanent_token
|
||||
- Env Var: RCLONE_FILEFABRIC_PERMANENT_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -219,10 +225,12 @@ usually valid for 1 hour.
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_FILEFABRIC_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --filefabric-token-expiry
|
||||
|
||||
@@ -231,10 +239,12 @@ Token expiry time.
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_expiry
|
||||
- Env Var: RCLONE_FILEFABRIC_TOKEN_EXPIRY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --filefabric-version
|
||||
|
||||
@@ -243,17 +253,21 @@ Version read from the file fabric.
|
||||
Don't set this value - rclone will set it automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: version
|
||||
- Env Var: RCLONE_FILEFABRIC_VERSION
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --filefabric-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_FILEFABRIC_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -157,7 +157,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.57.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.58.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -170,7 +170,7 @@ and may be set in the config file.
|
||||
--acd-auth-url string Auth server URL
|
||||
--acd-client-id string OAuth Client Id
|
||||
--acd-client-secret string OAuth Client Secret
|
||||
--acd-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--acd-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
|
||||
--acd-token string OAuth Access Token as a JSON blob
|
||||
--acd-token-url string Token server url
|
||||
@@ -179,9 +179,9 @@ and may be set in the config file.
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool or archive
|
||||
--azureblob-account string Storage Account Name
|
||||
--azureblob-archive-tier-delete Delete archive tier blobs before overwriting
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB) (default 4Mi)
|
||||
--azureblob-chunk-size SizeSuffix Upload chunk size (default 4Mi)
|
||||
--azureblob-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--azureblob-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
|
||||
--azureblob-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
|
||||
--azureblob-endpoint string Endpoint for the service
|
||||
--azureblob-key string Storage Account Key
|
||||
--azureblob-list-chunk int Size of blob list (default 5000)
|
||||
@@ -194,6 +194,7 @@ and may be set in the config file.
|
||||
--azureblob-public-access string Public access level of a container: blob or container
|
||||
--azureblob-sas-url string SAS URL for container level access only
|
||||
--azureblob-service-principal-file string Path to file containing credentials for use with a service principal
|
||||
--azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
|
||||
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
|
||||
--azureblob-use-emulator Uses local storage emulator if provided as 'true'
|
||||
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
|
||||
@@ -203,7 +204,7 @@ and may be set in the config file.
|
||||
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
|
||||
--b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
|
||||
--b2-download-url string Custom endpoint for downloads
|
||||
--b2-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--b2-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--b2-endpoint string Endpoint for the service
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files
|
||||
--b2-key string Application Key
|
||||
@@ -219,7 +220,7 @@ and may be set in the config file.
|
||||
--box-client-id string OAuth Client Id
|
||||
--box-client-secret string OAuth Client Secret
|
||||
--box-commit-retries int Max number of times to try committing a multipart file (default 100)
|
||||
--box-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
|
||||
--box-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
|
||||
--box-list-chunk int Size of listing chunk 1-1000 (default 1000)
|
||||
--box-owned-by string Only show items owned by the login (email address) passed in
|
||||
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point
|
||||
@@ -256,6 +257,7 @@ and may be set in the config file.
|
||||
--compress-remote string Remote to compress
|
||||
-L, --copy-links Follow symlinks and copy the pointed to item
|
||||
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
|
||||
--crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
|
||||
--crypt-filename-encryption string How to encrypt the filenames (default "standard")
|
||||
--crypt-no-data-encryption Option to either encrypt file data or leave it unencrypted
|
||||
--crypt-password string Password or pass phrase for encryption (obscured)
|
||||
@@ -270,8 +272,9 @@ and may be set in the config file.
|
||||
--drive-chunk-size SizeSuffix Upload chunk size (default 8Mi)
|
||||
--drive-client-id string Google Application Client Id
|
||||
--drive-client-secret string OAuth Client Secret
|
||||
--drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
|
||||
--drive-disable-http2 Disable drive using http2 (default true)
|
||||
--drive-encoding MultiEncoder This sets the encoding for the backend (default InvalidUtf8)
|
||||
--drive-encoding MultiEncoder The encoding for the backend (default InvalidUtf8)
|
||||
--drive-export-formats string Comma separated list of preferred formats for downloading Google docs (default "docx,xlsx,pptx,svg")
|
||||
--drive-formats string Deprecated: See export_formats
|
||||
--drive-impersonate string Impersonate this user when using a service account
|
||||
@@ -288,6 +291,7 @@ and may be set in the config file.
|
||||
--drive-shared-with-me Only show files that are shared with me
|
||||
--drive-size-as-quota Show sizes as storage quota usage, not actual size
|
||||
--drive-skip-checksum-gphotos Skip MD5 checksum on Google photos and videos only
|
||||
--drive-skip-dangling-shortcuts If set skip dangling shortcut files
|
||||
--drive-skip-gdocs Skip google documents in all listings
|
||||
--drive-skip-shortcuts If set skip shortcut files
|
||||
--drive-starred-only Only show files that are starred
|
||||
@@ -310,40 +314,41 @@ and may be set in the config file.
|
||||
--dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
|
||||
--dropbox-client-id string OAuth Client Id
|
||||
--dropbox-client-secret string OAuth Client Secret
|
||||
--dropbox-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
|
||||
--dropbox-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
|
||||
--dropbox-impersonate string Impersonate this user when using a business account
|
||||
--dropbox-shared-files Instructs rclone to work on individual shared files
|
||||
--dropbox-shared-folders Instructs rclone to work on shared folders
|
||||
--dropbox-token string OAuth Access Token as a JSON blob
|
||||
--dropbox-token-url string Token server url
|
||||
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
|
||||
--fichier-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
|
||||
--fichier-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
|
||||
--fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
|
||||
--fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
|
||||
--fichier-shared-folder string If you want to download a shared folder, add this parameter
|
||||
--filefabric-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--filefabric-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--filefabric-permanent-token string Permanent Authentication Token
|
||||
--filefabric-root-folder-id string ID of the root folder
|
||||
--filefabric-token string Session Token
|
||||
--filefabric-token-expiry string Token expiry time
|
||||
--filefabric-url string URL of the Enterprise File Fabric to connect to
|
||||
--filefabric-version string Version read from the file fabric
|
||||
--ftp-ask-password Allow asking for FTP password when needed
|
||||
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
|
||||
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
|
||||
--ftp-disable-epsv Disable using EPSV even if server advertises support
|
||||
--ftp-disable-mlsd Disable using MLSD even if server advertises support
|
||||
--ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
|
||||
--ftp-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
|
||||
--ftp-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,RightSpace,Dot)
|
||||
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||
--ftp-pass string FTP password (obscured)
|
||||
--ftp-port string FTP port number (default 21)
|
||||
--ftp-port int FTP port number (default 21)
|
||||
--ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
|
||||
--ftp-tls Use Implicit FTPS (FTP over TLS)
|
||||
--ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
|
||||
--ftp-user string FTP username, leave blank for current username, $USER
|
||||
--ftp-user string FTP username (default "$USER")
|
||||
--ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
|
||||
--gcs-anonymous Access public buckets and objects without credentials
|
||||
--gcs-auth-url string Auth server URL
|
||||
@@ -351,7 +356,7 @@ and may be set in the config file.
|
||||
--gcs-bucket-policy-only Access checks should use bucket-level IAM policies
|
||||
--gcs-client-id string OAuth Client Id
|
||||
--gcs-client-secret string OAuth Client Secret
|
||||
--gcs-encoding MultiEncoder This sets the encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gcs-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gcs-location string Location for the newly created buckets
|
||||
--gcs-object-acl string Access Control List for new objects
|
||||
--gcs-project-number string Project number
|
||||
@@ -362,7 +367,7 @@ and may be set in the config file.
|
||||
--gphotos-auth-url string Auth server URL
|
||||
--gphotos-client-id string OAuth Client Id
|
||||
--gphotos-client-secret string OAuth Client Secret
|
||||
--gphotos-encoding MultiEncoder This sets the encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gphotos-encoding MultiEncoder The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
|
||||
--gphotos-include-archived Also view and download archived media
|
||||
--gphotos-read-only Set to make the Google Photos backend read only
|
||||
--gphotos-read-size Set to read the size of media items
|
||||
@@ -374,38 +379,39 @@ and may be set in the config file.
|
||||
--hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
|
||||
--hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
|
||||
--hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
|
||||
--hdfs-encoding MultiEncoder This sets the encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
|
||||
--hdfs-encoding MultiEncoder The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
|
||||
--hdfs-namenode string Hadoop name node and port
|
||||
--hdfs-service-principal-name string Kerberos service principal name for the namenode
|
||||
--hdfs-username string Hadoop user name
|
||||
--http-headers CommaSepList Set HTTP headers for all transactions
|
||||
--http-no-head Don't use HEAD requests to find file sizes in dir listing
|
||||
--http-no-head Don't use HEAD requests
|
||||
--http-no-slash Set this if the site doesn't end directories with /
|
||||
--http-url string URL of http host to connect to
|
||||
--hubic-auth-url string Auth server URL
|
||||
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
|
||||
--hubic-client-id string OAuth Client Id
|
||||
--hubic-client-secret string OAuth Client Secret
|
||||
--hubic-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8)
|
||||
--hubic-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
|
||||
--hubic-no-chunk Don't chunk files during streaming upload
|
||||
--hubic-token string OAuth Access Token as a JSON blob
|
||||
--hubic-token-url string Token server url
|
||||
--jottacloud-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
|
||||
--jottacloud-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
|
||||
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash
|
||||
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
|
||||
--jottacloud-no-versions Avoid server side versioning by deleting files and recreating files instead of overwriting them
|
||||
--jottacloud-trashed-only Only show files that are in the trash
|
||||
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
|
||||
--koofr-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--koofr-endpoint string The Koofr API endpoint to use (default "https://app.koofr.net")
|
||||
--koofr-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--koofr-endpoint string The Koofr API endpoint to use
|
||||
--koofr-mountid string Mount ID of the mount to use
|
||||
--koofr-password string Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
|
||||
--koofr-password string Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password) (obscured)
|
||||
--koofr-provider string Choose your storage provider
|
||||
--koofr-setmtime Does the backend support setting modification time (default true)
|
||||
--koofr-user string Your Koofr user name
|
||||
--koofr-user string Your user name
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--local-case-insensitive Force the filesystem to report itself as case insensitive
|
||||
--local-case-sensitive Force the filesystem to report itself as case sensitive
|
||||
--local-encoding MultiEncoder This sets the encoding for the backend (default Slash,Dot)
|
||||
--local-encoding MultiEncoder The encoding for the backend (default Slash,Dot)
|
||||
--local-no-check-updated Don't check to see if the files change during upload
|
||||
--local-no-preallocate Disable preallocation of disk space for transferred files
|
||||
--local-no-set-modtime Disable setting modtime
|
||||
@@ -414,7 +420,7 @@ and may be set in the config file.
|
||||
--local-unicode-normalization Apply unicode NFC normalization to paths and filenames
|
||||
--local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
|
||||
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
|
||||
--mailru-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--mailru-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--mailru-pass string Password (obscured)
|
||||
--mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
|
||||
--mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash) (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
|
||||
@@ -422,18 +428,23 @@ and may be set in the config file.
|
||||
--mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk (default 32Mi)
|
||||
--mailru-user string User name (usually email)
|
||||
--mega-debug Output more debug from Mega
|
||||
--mega-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--mega-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--mega-hard-delete Delete files permanently rather than putting them into the trash
|
||||
--mega-pass string Password (obscured)
|
||||
--mega-user string User name
|
||||
--netstorage-account string Set the NetStorage account name
|
||||
--netstorage-host string Domain+path of NetStorage host to connect to
|
||||
--netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
|
||||
--netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
|
||||
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only)
|
||||
--onedrive-auth-url string Auth server URL
|
||||
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes) (default 10Mi)
|
||||
--onedrive-client-id string OAuth Client Id
|
||||
--onedrive-client-secret string OAuth Client Secret
|
||||
--onedrive-disable-site-permission Disable the request for Sites.Read.All permission
|
||||
--onedrive-drive-id string The ID of the drive to use
|
||||
--onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
|
||||
--onedrive-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--onedrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--onedrive-expose-onenote-files Set to make OneNote files show up in directory listings
|
||||
--onedrive-link-password string Set the password for links created by the link command
|
||||
--onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
|
||||
@@ -441,27 +452,28 @@ and may be set in the config file.
|
||||
--onedrive-list-chunk int Size of listing chunk (default 1000)
|
||||
--onedrive-no-versions Remove all versions on modifying operations
|
||||
--onedrive-region string Choose national cloud region for OneDrive (default "global")
|
||||
--onedrive-root-folder-id string ID of the root folder
|
||||
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs
|
||||
--onedrive-token string OAuth Access Token as a JSON blob
|
||||
--onedrive-token-url string Token server url
|
||||
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
|
||||
--opendrive-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
|
||||
--opendrive-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
|
||||
--opendrive-password string Password (obscured)
|
||||
--opendrive-username string Username
|
||||
--pcloud-auth-url string Auth server URL
|
||||
--pcloud-client-id string OAuth Client Id
|
||||
--pcloud-client-secret string OAuth Client Secret
|
||||
--pcloud-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--pcloud-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
|
||||
--pcloud-root-folder-id string Fill in for rclone to use a non root folder as its starting point (default "d0")
|
||||
--pcloud-token string OAuth Access Token as a JSON blob
|
||||
--pcloud-token-url string Token server url
|
||||
--premiumizeme-encoding MultiEncoder This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--putio-encoding MultiEncoder This sets the encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--premiumizeme-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--putio-encoding MultiEncoder The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--qingstor-access-key-id string QingStor Access Key ID
|
||||
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
|
||||
--qingstor-connection-retries int Number of connection retries (default 3)
|
||||
--qingstor-encoding MultiEncoder This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8)
|
||||
--qingstor-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8)
|
||||
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API
|
||||
--qingstor-env-auth Get QingStor credentials from runtime
|
||||
--qingstor-secret-access-key string QingStor Secret Access Key (password)
|
||||
@@ -476,12 +488,14 @@ and may be set in the config file.
|
||||
--s3-disable-checksum Don't store MD5 checksum with object metadata
|
||||
--s3-disable-http2 Disable usage of http2 for S3 backends
|
||||
--s3-download-url string Custom endpoint for downloads
|
||||
--s3-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--s3-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
--s3-endpoint string Endpoint for S3 API
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style (default true)
|
||||
--s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
|
||||
--s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
|
||||
--s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
|
||||
--s3-list-version int Version of ListObjects to use: 1,2 or 0 for auto
|
||||
--s3-location-constraint string Location constraint - must be set to match the Region
|
||||
--s3-max-upload-parts int Maximum number of parts in a multipart upload (default 10000)
|
||||
--s3-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed (default 1m0s)
|
||||
@@ -505,10 +519,11 @@ and may be set in the config file.
|
||||
--s3-upload-concurrency int Concurrency for multipart uploads (default 4)
|
||||
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
|
||||
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
|
||||
--s3-v2-auth If true use v2 authentication
|
||||
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
|
||||
--seafile-create-library Should rclone create a library if it doesn't exist
|
||||
--seafile-encoding MultiEncoder This sets the encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
|
||||
--seafile-encoding MultiEncoder The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
|
||||
--seafile-library string Name of the library
|
||||
--seafile-library-key string Library password (for encrypted libraries only) (obscured)
|
||||
--seafile-pass string Password (obscured)
|
||||
@@ -528,7 +543,7 @@ and may be set in the config file.
|
||||
--sftp-md5sum-command string The command used to read md5 hashes
|
||||
--sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
|
||||
--sftp-path-override string Override path used by SSH connection
|
||||
--sftp-port string SSH port number (default 22)
|
||||
--sftp-port int SSH port number (default 22)
|
||||
--sftp-pubkey-file string Optional path to public key file
|
||||
--sftp-server-command string Specifies the path or command to run a sftp server on the remote host
|
||||
--sftp-set-modtime Set the modified time on the remote if set (default true)
|
||||
@@ -537,23 +552,28 @@ and may be set in the config file.
|
||||
--sftp-subsystem string Specifies the SSH2 subsystem on the remote host (default "sftp")
|
||||
--sftp-use-fstat If set use fstat instead of stat
|
||||
--sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods
|
||||
--sftp-user string SSH username, leave blank for current username, $USER
|
||||
--sftp-user string SSH username (default "$USER")
|
||||
--sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
|
||||
--sharefile-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--sharefile-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
|
||||
--sharefile-endpoint string Endpoint for API calls
|
||||
--sharefile-root-folder-id string ID of the root folder
|
||||
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
|
||||
--sia-api-password string Sia Daemon API Password (obscured)
|
||||
--sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
|
||||
--sia-encoding MultiEncoder This sets the encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
|
||||
--sia-encoding MultiEncoder The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
|
||||
--sia-user-agent string Siad User Agent (default "Sia-Agent")
|
||||
--skip-links Don't warn about skipped symlinks
|
||||
--storj-access-grant string Access grant
|
||||
--storj-api-key string API key
|
||||
--storj-passphrase string Encryption passphrase
|
||||
--storj-provider string Choose an authentication method (default "existing")
|
||||
--storj-satellite-address string Satellite address (default "us-central-1.storj.io")
|
||||
--sugarsync-access-key-id string Sugarsync Access Key ID
|
||||
--sugarsync-app-id string Sugarsync App ID
|
||||
--sugarsync-authorization string Sugarsync authorization
|
||||
--sugarsync-authorization-expiry string Sugarsync authorization expiry
|
||||
--sugarsync-deleted-id string Sugarsync deleted folder id
|
||||
--sugarsync-encoding MultiEncoder This sets the encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
|
||||
--sugarsync-encoding MultiEncoder The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
|
||||
--sugarsync-hard-delete Permanently delete files if true
|
||||
--sugarsync-private-access-key string Sugarsync Private Access Key
|
||||
--sugarsync-refresh-token string Sugarsync refresh token
|
||||
@@ -567,7 +587,7 @@ and may be set in the config file.
|
||||
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
|
||||
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
|
||||
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
|
||||
--swift-encoding MultiEncoder This sets the encoding for the backend (default Slash,InvalidUtf8)
|
||||
--swift-encoding MultiEncoder The encoding for the backend (default Slash,InvalidUtf8)
|
||||
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
|
||||
--swift-env-auth Get swift credentials from environment variables in standard OpenStack form
|
||||
--swift-key string API key or password (OS_PASSWORD)
|
||||
@@ -581,21 +601,16 @@ and may be set in the config file.
|
||||
--swift-tenant-id string Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
|
||||
--swift-user string User name to log in (OS_USERNAME)
|
||||
--swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID)
|
||||
--tardigrade-access-grant string Access grant
|
||||
--tardigrade-api-key string API key
|
||||
--tardigrade-passphrase string Encryption passphrase
|
||||
--tardigrade-provider string Choose an authentication method (default "existing")
|
||||
--tardigrade-satellite-address string Satellite address (default "us-central-1.tardigrade.io")
|
||||
--union-action-policy string Policy to choose upstream on ACTION category (default "epall")
|
||||
--union-cache-time int Cache time of usage and free space (in seconds) (default 120)
|
||||
--union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
|
||||
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
|
||||
--union-upstreams string List of space separated upstreams
|
||||
--uptobox-access-token string Your access token
|
||||
--uptobox-encoding MultiEncoder This sets the encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
|
||||
--uptobox-encoding MultiEncoder The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||
--webdav-bearer-token-command string Command to run to get a bearer token
|
||||
--webdav-encoding string This sets the encoding for the backend
|
||||
--webdav-encoding string The encoding for the backend
|
||||
--webdav-headers CommaSepList Set HTTP headers for all transactions
|
||||
--webdav-pass string Password (obscured)
|
||||
--webdav-url string URL of http host to connect to
|
||||
@@ -604,13 +619,14 @@ and may be set in the config file.
|
||||
--yandex-auth-url string Auth server URL
|
||||
--yandex-client-id string OAuth Client Id
|
||||
--yandex-client-secret string OAuth Client Secret
|
||||
--yandex-encoding MultiEncoder This sets the encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-encoding MultiEncoder The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
|
||||
--yandex-hard-delete Delete files permanently rather than putting them into the trash
|
||||
--yandex-token string OAuth Access Token as a JSON blob
|
||||
--yandex-token-url string Token server url
|
||||
--zoho-auth-url string Auth server URL
|
||||
--zoho-client-id string OAuth Client Id
|
||||
--zoho-client-secret string OAuth Client Secret
|
||||
--zoho-encoding MultiEncoder This sets the encoding for the backend (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-encoding MultiEncoder The encoding for the backend (default Del,Ctl,InvalidUtf8)
|
||||
--zoho-region string Zoho region to connect to
|
||||
--zoho-token string OAuth Access Token as a JSON blob
|
||||
--zoho-token-url string Token server url
|
||||
|
||||
@@ -146,28 +146,34 @@ FTP host to connect to.
|
||||
|
||||
E.g. "ftp.example.com".
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: host
|
||||
- Env Var: RCLONE_FTP_HOST
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --ftp-user
|
||||
|
||||
FTP username, leave blank for current username, $USER.
|
||||
FTP username.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_FTP_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Default: "$USER"
|
||||
|
||||
#### --ftp-port
|
||||
|
||||
FTP port, leave blank to use default (21).
|
||||
FTP port number.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: port
|
||||
- Env Var: RCLONE_FTP_PORT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Type: int
|
||||
- Default: 21
|
||||
|
||||
#### --ftp-pass
|
||||
|
||||
@@ -175,19 +181,12 @@ FTP password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pass
|
||||
- Env Var: RCLONE_FTP_PASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --ftp-ask-password
|
||||
|
||||
Ask for password when connecting to a FTP server and no password is configured.
|
||||
|
||||
- Config: ask_password
|
||||
- Env Var: RCLONE_FTP_ASK_PASSWORD
|
||||
- Type: bool
|
||||
- Default: false
|
||||
- Required: false
|
||||
|
||||
#### --ftp-tls
|
||||
|
||||
@@ -198,6 +197,8 @@ right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: tls
|
||||
- Env Var: RCLONE_FTP_TLS
|
||||
- Type: bool
|
||||
@@ -211,6 +212,8 @@ When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: explicit_tls
|
||||
- Env Var: RCLONE_FTP_EXPLICIT_TLS
|
||||
- Type: bool
|
||||
@@ -224,6 +227,8 @@ Here are the advanced options specific to ftp (FTP Connection).
|
||||
|
||||
Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: concurrency
|
||||
- Env Var: RCLONE_FTP_CONCURRENCY
|
||||
- Type: int
|
||||
@@ -233,6 +238,8 @@ Maximum number of FTP simultaneous connections, 0 for unlimited.
|
||||
|
||||
Do not verify the TLS certificate of the server.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_check_certificate
|
||||
- Env Var: RCLONE_FTP_NO_CHECK_CERTIFICATE
|
||||
- Type: bool
|
||||
@@ -242,6 +249,8 @@ Do not verify the TLS certificate of the server.
|
||||
|
||||
Disable using EPSV even if server advertises support.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_epsv
|
||||
- Env Var: RCLONE_FTP_DISABLE_EPSV
|
||||
- Type: bool
|
||||
@@ -251,6 +260,8 @@ Disable using EPSV even if server advertises support.
|
||||
|
||||
Disable using MLSD even if server advertises support.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_mlsd
|
||||
- Env Var: RCLONE_FTP_DISABLE_MLSD
|
||||
- Type: bool
|
||||
@@ -260,6 +271,8 @@ Disable using MLSD even if server advertises support.
|
||||
|
||||
Use MDTM to set modification time (VsFtpd quirk)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: writing_mdtm
|
||||
- Env Var: RCLONE_FTP_WRITING_MDTM
|
||||
- Type: bool
|
||||
@@ -275,6 +288,8 @@ given, rclone will empty the connection pool.
|
||||
Set to 0 to keep connections indefinitely.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: idle_timeout
|
||||
- Env Var: RCLONE_FTP_IDLE_TIMEOUT
|
||||
- Type: Duration
|
||||
@@ -284,6 +299,8 @@ Set to 0 to keep connections indefinitely.
|
||||
|
||||
Maximum time to wait for a response to close.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: close_timeout
|
||||
- Env Var: RCLONE_FTP_CLOSE_TIMEOUT
|
||||
- Type: Duration
|
||||
@@ -297,6 +314,8 @@ TLS cache allows to resume TLS sessions and reuse PSK between connections.
|
||||
Increase if default size is not enough resulting in TLS resumption errors.
|
||||
Enabled by default. Use 0 to disable.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: tls_cache_size
|
||||
- Env Var: RCLONE_FTP_TLS_CACHE_SIZE
|
||||
- Type: int
|
||||
@@ -306,6 +325,8 @@ Enabled by default. Use 0 to disable.
|
||||
|
||||
Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_tls13
|
||||
- Env Var: RCLONE_FTP_DISABLE_TLS13
|
||||
- Type: bool
|
||||
@@ -315,17 +336,35 @@ Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
|
||||
|
||||
Maximum time to wait for data connection closing status.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: shut_timeout
|
||||
- Env Var: RCLONE_FTP_SHUT_TIMEOUT
|
||||
- Type: Duration
|
||||
- Default: 1m0s
|
||||
|
||||
#### --ftp-ask-password
|
||||
|
||||
Allow asking for FTP password when needed.
|
||||
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: ask_password
|
||||
- Env Var: RCLONE_FTP_ASK_PASSWORD
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --ftp-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_FTP_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -281,10 +281,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_GCS_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-client-secret
|
||||
|
||||
@@ -292,10 +294,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_GCS_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-project-number
|
||||
|
||||
@@ -303,10 +307,12 @@ Project number.
|
||||
|
||||
Optional - needed only for list/create/delete buckets - see your developer console.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: project_number
|
||||
- Env Var: RCLONE_GCS_PROJECT_NUMBER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-service-account-file
|
||||
|
||||
@@ -317,10 +323,12 @@ Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_account_file
|
||||
- Env Var: RCLONE_GCS_SERVICE_ACCOUNT_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-service-account-credentials
|
||||
|
||||
@@ -329,10 +337,12 @@ Service Account Credentials JSON blob.
|
||||
Leave blank normally.
|
||||
Needed only if you want use SA instead of interactive login.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_account_credentials
|
||||
- Env Var: RCLONE_GCS_SERVICE_ACCOUNT_CREDENTIALS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-anonymous
|
||||
|
||||
@@ -340,6 +350,8 @@ Access public buckets and objects without credentials.
|
||||
|
||||
Set to 'true' if you just want to download files and don't configure credentials.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: anonymous
|
||||
- Env Var: RCLONE_GCS_ANONYMOUS
|
||||
- Type: bool
|
||||
@@ -349,10 +361,12 @@ Set to 'true' if you just want to download files and don't configure credentials
|
||||
|
||||
Access Control List for new objects.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: object_acl
|
||||
- Env Var: RCLONE_GCS_OBJECT_ACL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "authenticatedRead"
|
||||
- Object owner gets OWNER access.
|
||||
@@ -377,10 +391,12 @@ Access Control List for new objects.
|
||||
|
||||
Access Control List for new buckets.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: bucket_acl
|
||||
- Env Var: RCLONE_GCS_BUCKET_ACL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "authenticatedRead"
|
||||
- Project team owners get OWNER access.
|
||||
@@ -413,6 +429,8 @@ When it is set, rclone:
|
||||
Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: bucket_policy_only
|
||||
- Env Var: RCLONE_GCS_BUCKET_POLICY_ONLY
|
||||
- Type: bool
|
||||
@@ -422,10 +440,12 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
|
||||
Location for the newly created buckets.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: location
|
||||
- Env Var: RCLONE_GCS_LOCATION
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- Empty for default location (US)
|
||||
@@ -441,12 +461,22 @@ Location for the newly created buckets.
|
||||
- Hong Kong
|
||||
- "asia-northeast1"
|
||||
- Tokyo
|
||||
- "asia-northeast2"
|
||||
- Osaka
|
||||
- "asia-northeast3"
|
||||
- Seoul
|
||||
- "asia-south1"
|
||||
- Mumbai
|
||||
- "asia-south2"
|
||||
- Delhi
|
||||
- "asia-southeast1"
|
||||
- Singapore
|
||||
- "asia-southeast2"
|
||||
- Jakarta
|
||||
- "australia-southeast1"
|
||||
- Sydney
|
||||
- "australia-southeast2"
|
||||
- Melbourne
|
||||
- "europe-north1"
|
||||
- Finland
|
||||
- "europe-west1"
|
||||
@@ -457,6 +487,10 @@ Location for the newly created buckets.
|
||||
- Frankfurt
|
||||
- "europe-west4"
|
||||
- Netherlands
|
||||
- "europe-west6"
|
||||
- Zürich
|
||||
- "europe-central2"
|
||||
- Warsaw
|
||||
- "us-central1"
|
||||
- Iowa
|
||||
- "us-east1"
|
||||
@@ -467,15 +501,35 @@ Location for the newly created buckets.
|
||||
- Oregon
|
||||
- "us-west2"
|
||||
- California
|
||||
- "us-west3"
|
||||
- Salt Lake City
|
||||
- "us-west4"
|
||||
- Las Vegas
|
||||
- "northamerica-northeast1"
|
||||
- Montréal
|
||||
- "northamerica-northeast2"
|
||||
- Toronto
|
||||
- "southamerica-east1"
|
||||
- São Paulo
|
||||
- "southamerica-west1"
|
||||
- Santiago
|
||||
- "asia1"
|
||||
- Dual region: asia-northeast1 and asia-northeast2.
|
||||
- "eur4"
|
||||
- Dual region: europe-north1 and europe-west4.
|
||||
- "nam4"
|
||||
- Dual region: us-central1 and us-east1.
|
||||
|
||||
#### --gcs-storage-class
|
||||
|
||||
The storage class to use when storing objects in Google Cloud Storage.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: storage_class
|
||||
- Env Var: RCLONE_GCS_STORAGE_CLASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- Default
|
||||
@@ -500,10 +554,12 @@ Here are the advanced options specific to google cloud storage (Google Cloud Sto
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_GCS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-auth-url
|
||||
|
||||
@@ -511,10 +567,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_GCS_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-token-url
|
||||
|
||||
@@ -522,17 +580,21 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_GCS_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gcs-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_GCS_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -232,10 +232,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_GPHOTOS_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gphotos-client-secret
|
||||
|
||||
@@ -243,10 +245,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_GPHOTOS_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gphotos-read-only
|
||||
|
||||
@@ -255,6 +259,8 @@ Set to make the Google Photos backend read only.
|
||||
If you choose read only then rclone will only request read only access
|
||||
to your photos, otherwise rclone will request full access.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: read_only
|
||||
- Env Var: RCLONE_GPHOTOS_READ_ONLY
|
||||
- Type: bool
|
||||
@@ -268,10 +274,12 @@ Here are the advanced options specific to google photos (Google Photos).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_GPHOTOS_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gphotos-auth-url
|
||||
|
||||
@@ -279,10 +287,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_GPHOTOS_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gphotos-token-url
|
||||
|
||||
@@ -290,10 +300,12 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_GPHOTOS_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --gphotos-read-size
|
||||
|
||||
@@ -305,6 +317,8 @@ rclone mount needs to know the size of files in advance of reading
|
||||
them, so setting this flag when using rclone mount is recommended if
|
||||
you want to read the media.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: read_size
|
||||
- Env Var: RCLONE_GPHOTOS_READ_SIZE
|
||||
- Type: bool
|
||||
@@ -314,6 +328,8 @@ you want to read the media.
|
||||
|
||||
Year limits the photos to be downloaded to those which are uploaded after the given year.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: start_year
|
||||
- Env Var: RCLONE_GPHOTOS_START_YEAR
|
||||
- Type: int
|
||||
@@ -323,7 +339,7 @@ Year limits the photos to be downloaded to those which are uploaded after the gi
|
||||
|
||||
Also view and download archived media.
|
||||
|
||||
By default rclone does not request archived media. Thus, when syncing,
|
||||
By default, rclone does not request archived media. Thus, when syncing,
|
||||
archived media is not visible in directory listings or transferred.
|
||||
|
||||
Note that media in albums is always visible and synced, no matter
|
||||
@@ -335,6 +351,8 @@ listings and transferred.
|
||||
Without this flag, archived media will not be visible in directory
|
||||
listings and won't be transferred.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: include_archived
|
||||
- Env Var: RCLONE_GPHOTOS_INCLUDE_ARCHIVED
|
||||
- Type: bool
|
||||
@@ -342,10 +360,12 @@ listings and won't be transferred.
|
||||
|
||||
#### --gphotos-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_GPHOTOS_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -178,15 +178,19 @@ Here are the standard options specific to hasher (Better checksums for other rem
|
||||
|
||||
Remote to cache checksums for (e.g. myRemote:path).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: remote
|
||||
- Env Var: RCLONE_HASHER_REMOTE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --hasher-hashes
|
||||
|
||||
Comma separated list of supported checksum types.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hashes
|
||||
- Env Var: RCLONE_HASHER_HASHES
|
||||
- Type: CommaSepList
|
||||
@@ -196,6 +200,8 @@ Comma separated list of supported checksum types.
|
||||
|
||||
Maximum time to keep checksums in cache (0 = no cache, off = cache forever).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: max_age
|
||||
- Env Var: RCLONE_HASHER_MAX_AGE
|
||||
- Type: Duration
|
||||
@@ -209,6 +215,8 @@ Here are the advanced options specific to hasher (Better checksums for other rem
|
||||
|
||||
Auto-update checksum for files smaller than this size (disabled by default).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auto_size
|
||||
- Env Var: RCLONE_HASHER_AUTO_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -228,7 +236,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### drop
|
||||
|
||||
|
||||
@@ -159,19 +159,23 @@ Hadoop name node and port.
|
||||
|
||||
E.g. "namenode:8020" to connect to host namenode at port 8020.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: namenode
|
||||
- Env Var: RCLONE_HDFS_NAMENODE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --hdfs-username
|
||||
|
||||
Hadoop user name.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: username
|
||||
- Env Var: RCLONE_HDFS_USERNAME
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "root"
|
||||
- Connect to hdfs as root.
|
||||
@@ -188,10 +192,12 @@ Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: service_principal_name
|
||||
- Env Var: RCLONE_HDFS_SERVICE_PRINCIPAL_NAME
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --hdfs-data-transfer-protection
|
||||
|
||||
@@ -202,20 +208,24 @@ checks, and wire encryption is required when communicating the the
|
||||
datanodes. Possible values are 'authentication', 'integrity' and
|
||||
'privacy'. Used only with KERBEROS enabled.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: data_transfer_protection
|
||||
- Env Var: RCLONE_HDFS_DATA_TRANSFER_PROTECTION
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "privacy"
|
||||
- Ensure authentication, integrity and encryption enabled.
|
||||
|
||||
#### --hdfs-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_HDFS_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -134,10 +134,12 @@ URL of http host to connect to.
|
||||
|
||||
E.g. "https://example.com", or "https://user:pass@example.com" to use a username and password.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: url
|
||||
- Env Var: RCLONE_HTTP_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -152,10 +154,11 @@ Use this to set additional HTTP headers for all transactions.
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: headers
|
||||
- Env Var: RCLONE_HTTP_HEADERS
|
||||
@@ -177,6 +180,8 @@ URLs from them rather than downloading them.
|
||||
Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_slash
|
||||
- Env Var: RCLONE_HTTP_NO_SLASH
|
||||
- Type: bool
|
||||
@@ -184,8 +189,9 @@ directories.
|
||||
|
||||
#### --http-no-head
|
||||
|
||||
Don't use HEAD requests to find file sizes in dir listing.
|
||||
Don't use HEAD requests.
|
||||
|
||||
HEAD requests are mainly used to find file sizes in dir listing.
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
@@ -194,12 +200,11 @@ directory listing to:
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
that directory listings are much quicker, but rclone won't have the times or
|
||||
sizes of any files, and some files that don't exist may be in the listing.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_head
|
||||
- Env Var: RCLONE_HTTP_NO_HEAD
|
||||
|
||||
@@ -117,10 +117,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --hubic-client-secret
|
||||
|
||||
@@ -128,10 +130,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_HUBIC_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -141,10 +145,12 @@ Here are the advanced options specific to hubic (Hubic).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_HUBIC_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --hubic-auth-url
|
||||
|
||||
@@ -152,10 +158,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_HUBIC_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --hubic-token-url
|
||||
|
||||
@@ -163,10 +171,12 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_HUBIC_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --hubic-chunk-size
|
||||
|
||||
@@ -175,6 +185,8 @@ Above this size files will be chunked into a _segments container.
|
||||
Above this size files will be chunked into a _segments container. The
|
||||
default for this is 5 GiB which is its maximum value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_HUBIC_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -193,6 +205,8 @@ files are easier to deal with and have an MD5SUM.
|
||||
Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_chunk
|
||||
- Env Var: RCLONE_HUBIC_NO_CHUNK
|
||||
- Type: bool
|
||||
@@ -200,10 +214,12 @@ copy operations.
|
||||
|
||||
#### --hubic-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_HUBIC_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -190,7 +190,7 @@ kill %1
|
||||
|
||||
## Install from source ##
|
||||
|
||||
Make sure you have at least [Go](https://golang.org/) go1.15
|
||||
Make sure you have at least [Go](https://golang.org/) go1.16
|
||||
installed. [Download go](https://golang.org/dl/) if necessary. The
|
||||
latest release is recommended. Then
|
||||
|
||||
|
||||
222
docs/content/internetarchive.md
Normal file
222
docs/content/internetarchive.md
Normal file
@@ -0,0 +1,222 @@
|
||||
---
|
||||
title: "Internet Archive"
|
||||
description: "Rclone docs for Internet Archive"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-archive" >}} Internet Archive
|
||||
|
||||
The Internet Archive backend utilizes Items on [archive.org](https://archive.org/)
|
||||
|
||||
Refer to [IAS3 API documentation](https://archive.org/services/docs/api/ias3.html) for the API this backend uses.
|
||||
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
|
||||
|
||||
Once you have made a remote (see the provider specific section above)
|
||||
you can use it like this:
|
||||
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
|
||||
Make a new item
|
||||
|
||||
rclone mkdir remote:item
|
||||
|
||||
List the contents of a item
|
||||
|
||||
rclone ls remote:item
|
||||
|
||||
Sync `/home/local/directory` to the remote item, deleting any excess
|
||||
files in the item.
|
||||
|
||||
rclone sync -i /home/local/directory remote:item
|
||||
|
||||
## Notes
|
||||
Because of Internet Archive's architecture, it enqueues write operations (and extra post-processings) in a per-item queue. You can check item's queue at https://catalogd.archive.org/history/item-name-here . Because of that, all uploads/deletes will not show up immediately and takes some time to be available.
|
||||
The per-item queue is enqueued to an another queue, Item Deriver Queue. [You can check the status of Item Deriver Queue here.](https://catalogd.archive.org/catalog.php?whereami=1) This queue has a limit, and it may block you from uploading, or even deleting. You should avoid uploading a lot of small files for better behavior.
|
||||
|
||||
You can optionally wait for the server's processing to finish, by setting non-zero value to `wait_archive` key.
|
||||
By making it wait, rclone can do normal file comparison.
|
||||
Make sure to set a large enough value (e.g. `30m0s` for smaller files) as it can take a long time depending on server's queue.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making an internetarchive configuration.
|
||||
Most applies to the other providers as well, any differences are described [below](#providers).
|
||||
|
||||
First run
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
name> remote
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
XX / InternetArchive Items
|
||||
\ (internetarchive)
|
||||
Storage> internetarchive
|
||||
Option access_key_id.
|
||||
IAS3 Access Key.
|
||||
Leave blank for anonymous access.
|
||||
You can find one here: https://archive.org/account/s3.php
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> XXXX
|
||||
Option secret_access_key.
|
||||
IAS3 Secret Key (password).
|
||||
Leave blank for anonymous access.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> XXXX
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> y
|
||||
Option endpoint.
|
||||
IAS3 Endpoint.
|
||||
Leave blank for default value.
|
||||
Enter a string value. Press Enter for the default (https://s3.us.archive.org).
|
||||
endpoint>
|
||||
Option front_endpoint.
|
||||
Host of InternetArchive Frontend.
|
||||
Leave blank for default value.
|
||||
Enter a string value. Press Enter for the default (https://archive.org).
|
||||
front_endpoint>
|
||||
Option disable_checksum.
|
||||
Don't store MD5 checksum with object metadata.
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can ask the server to check the object against checksum.
|
||||
This is great for data integrity checking but can cause long delays for
|
||||
large files to start uploading.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
disable_checksum> true
|
||||
Option encoding.
|
||||
The encoding for the backend.
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
Enter a encoder.MultiEncoder value. Press Enter for the default (Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot).
|
||||
encoding>
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
--------------------
|
||||
[remote]
|
||||
type = internetarchive
|
||||
access_key_id = XXXX
|
||||
secret_access_key = XXXX
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/internetarchive/internetarchive.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the standard options specific to internetarchive (Internet Archive).
|
||||
|
||||
#### --internetarchive-access-key-id
|
||||
|
||||
IAS3 Access Key.
|
||||
|
||||
Leave blank for anonymous access.
|
||||
You can find one here: https://archive.org/account/s3.php
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_key_id
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_ACCESS_KEY_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --internetarchive-secret-access-key
|
||||
|
||||
IAS3 Secret Key (password).
|
||||
|
||||
Leave blank for anonymous access.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: secret_access_key
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_SECRET_ACCESS_KEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the advanced options specific to internetarchive (Internet Archive).
|
||||
|
||||
#### --internetarchive-endpoint
|
||||
|
||||
IAS3 Endpoint.
|
||||
|
||||
Leave blank for default value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_ENDPOINT
|
||||
- Type: string
|
||||
- Default: "https://s3.us.archive.org"
|
||||
|
||||
#### --internetarchive-front-endpoint
|
||||
|
||||
Host of InternetArchive Frontend.
|
||||
|
||||
Leave blank for default value.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: front_endpoint
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_FRONT_ENDPOINT
|
||||
- Type: string
|
||||
- Default: "https://archive.org"
|
||||
|
||||
#### --internetarchive-disable-checksum
|
||||
|
||||
Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can ask the server to check the object against checksum.
|
||||
This is great for data integrity checking but can cause long delays for
|
||||
large files to start uploading.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_checksum
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_DISABLE_CHECKSUM
|
||||
- Type: bool
|
||||
- Default: true
|
||||
|
||||
#### --internetarchive-wait-archive
|
||||
|
||||
Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish.
|
||||
Only enable if you need to be guaranteed to be reflected after write operations.
|
||||
0 to disable waiting. No errors to be thrown in case of timeout.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: wait_archive
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_WAIT_ARCHIVE
|
||||
- Type: Duration
|
||||
- Default: 0s
|
||||
|
||||
#### --internetarchive-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_INTERNETARCHIVE_ENCODING
|
||||
- Type: MultiEncoder
|
||||
- Default: Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
@@ -241,6 +241,8 @@ Here are the advanced options specific to jottacloud (Jottacloud).
|
||||
|
||||
Files bigger than this will be cached on disk to calculate the MD5 if required.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: md5_memory_limit
|
||||
- Env Var: RCLONE_JOTTACLOUD_MD5_MEMORY_LIMIT
|
||||
- Type: SizeSuffix
|
||||
@@ -252,6 +254,8 @@ Only show files that are in the trash.
|
||||
|
||||
This will show trashed files in their original directory structure.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: trashed_only
|
||||
- Env Var: RCLONE_JOTTACLOUD_TRASHED_ONLY
|
||||
- Type: bool
|
||||
@@ -261,6 +265,8 @@ This will show trashed files in their original directory structure.
|
||||
|
||||
Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_JOTTACLOUD_HARD_DELETE
|
||||
- Type: bool
|
||||
@@ -270,6 +276,8 @@ Delete files permanently rather than putting them into the trash.
|
||||
|
||||
Files bigger than this can be resumed if the upload fail's.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_resume_limit
|
||||
- Env Var: RCLONE_JOTTACLOUD_UPLOAD_RESUME_LIMIT
|
||||
- Type: SizeSuffix
|
||||
@@ -279,6 +287,8 @@ Files bigger than this can be resumed if the upload fail's.
|
||||
|
||||
Avoid server side versioning by deleting files and recreating files instead of overwriting them.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_versions
|
||||
- Env Var: RCLONE_JOTTACLOUD_NO_VERSIONS
|
||||
- Type: bool
|
||||
@@ -286,10 +296,12 @@ Avoid server side versioning by deleting files and recreating files instead of o
|
||||
|
||||
#### --jottacloud-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_JOTTACLOUD_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -333,10 +333,12 @@ Here are the advanced options specific to local (Local Disk).
|
||||
|
||||
Disable UNC (long path names) conversion on Windows.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: nounc
|
||||
- Env Var: RCLONE_LOCAL_NOUNC
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "true"
|
||||
- Disables long file names.
|
||||
@@ -345,6 +347,8 @@ Disable UNC (long path names) conversion on Windows.
|
||||
|
||||
Follow symlinks and copy the pointed to item.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_links
|
||||
- Env Var: RCLONE_LOCAL_COPY_LINKS
|
||||
- Type: bool
|
||||
@@ -354,6 +358,8 @@ Follow symlinks and copy the pointed to item.
|
||||
|
||||
Translate symlinks to/from regular files with a '.rclonelink' extension.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: links
|
||||
- Env Var: RCLONE_LOCAL_LINKS
|
||||
- Type: bool
|
||||
@@ -366,6 +372,8 @@ Don't warn about skipped symlinks.
|
||||
This flag disables warning messages on skipped symlinks or junction
|
||||
points, as you explicitly acknowledge that they should be skipped.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: skip_links
|
||||
- Env Var: RCLONE_LOCAL_SKIP_LINKS
|
||||
- Type: bool
|
||||
@@ -384,6 +392,8 @@ Rclone used to use the Stat size of links as the link size, but this fails in qu
|
||||
So rclone now always reads the link.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: zero_size_links
|
||||
- Env Var: RCLONE_LOCAL_ZERO_SIZE_LINKS
|
||||
- Type: bool
|
||||
@@ -406,6 +416,8 @@ some OSes.
|
||||
Note that rclone compares filenames with unicode normalization in the sync
|
||||
routine so this flag shouldn't normally be used.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: unicode_normalization
|
||||
- Env Var: RCLONE_LOCAL_UNICODE_NORMALIZATION
|
||||
- Type: bool
|
||||
@@ -440,6 +452,8 @@ time we:
|
||||
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_check_updated
|
||||
- Env Var: RCLONE_LOCAL_NO_CHECK_UPDATED
|
||||
- Type: bool
|
||||
@@ -449,6 +463,8 @@ time we:
|
||||
|
||||
Don't cross filesystem boundaries (unix/macOS only).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: one_file_system
|
||||
- Env Var: RCLONE_LOCAL_ONE_FILE_SYSTEM
|
||||
- Type: bool
|
||||
@@ -462,6 +478,8 @@ Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: case_sensitive
|
||||
- Env Var: RCLONE_LOCAL_CASE_SENSITIVE
|
||||
- Type: bool
|
||||
@@ -475,6 +493,8 @@ Normally the local backend declares itself as case insensitive on
|
||||
Windows/macOS and case sensitive for everything else. Use this flag
|
||||
to override the default choice.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: case_insensitive
|
||||
- Env Var: RCLONE_LOCAL_CASE_INSENSITIVE
|
||||
- Type: bool
|
||||
@@ -490,6 +510,8 @@ Stream) may incorrectly set the actual file size equal to the
|
||||
preallocated space, causing checksum and file size checks to fail.
|
||||
Use this flag to disable preallocation.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_preallocate
|
||||
- Env Var: RCLONE_LOCAL_NO_PREALLOCATE
|
||||
- Type: bool
|
||||
@@ -504,6 +526,8 @@ multi-thread downloads. This avoids long pauses on large files where
|
||||
the OS zeros the file. However sparse files may be undesirable as they
|
||||
cause disk fragmentation and can be slow to work with.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_sparse
|
||||
- Env Var: RCLONE_LOCAL_NO_SPARSE
|
||||
- Type: bool
|
||||
@@ -519,6 +543,8 @@ the user rclone is running as does not own the file uploaded, such as
|
||||
when copying to a CIFS mount owned by another user. If this option is
|
||||
enabled, rclone will no longer update the modtime after copying a file.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_set_modtime
|
||||
- Env Var: RCLONE_LOCAL_NO_SET_MODTIME
|
||||
- Type: bool
|
||||
@@ -526,10 +552,12 @@ enabled, rclone will no longer update the modtime after copying a file.
|
||||
|
||||
#### --local-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_LOCAL_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -549,7 +577,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend/command).
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### noop
|
||||
|
||||
|
||||
@@ -162,10 +162,12 @@ Here are the standard options specific to mailru (Mail.ru Cloud).
|
||||
|
||||
User name (usually email).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_MAILRU_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --mailru-pass
|
||||
|
||||
@@ -173,10 +175,12 @@ Password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pass
|
||||
- Env Var: RCLONE_MAILRU_PASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --mailru-speedup-enable
|
||||
|
||||
@@ -191,6 +195,8 @@ content hash in advance and decide whether full upload is required.
|
||||
Also, if rclone does not know file size in advance (e.g. in case of
|
||||
streaming or partial uploads), it will not even try this optimization.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: speedup_enable
|
||||
- Env Var: RCLONE_MAILRU_SPEEDUP_ENABLE
|
||||
- Type: bool
|
||||
@@ -211,6 +217,8 @@ Comma separated list of file name patterns eligible for speedup (put by hash).
|
||||
|
||||
Patterns are case insensitive and can contain '*' or '?' meta characters.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: speedup_file_patterns
|
||||
- Env Var: RCLONE_MAILRU_SPEEDUP_FILE_PATTERNS
|
||||
- Type: string
|
||||
@@ -231,6 +239,8 @@ This option allows you to disable speedup (put by hash) for large files.
|
||||
|
||||
Reason is that preliminary hashing can exhaust your RAM or disk space.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: speedup_max_disk
|
||||
- Env Var: RCLONE_MAILRU_SPEEDUP_MAX_DISK
|
||||
- Type: SizeSuffix
|
||||
@@ -247,6 +257,8 @@ Reason is that preliminary hashing can exhaust your RAM or disk space.
|
||||
|
||||
Files larger than the size given below will always be hashed on disk.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: speedup_max_memory
|
||||
- Env Var: RCLONE_MAILRU_SPEEDUP_MAX_MEMORY
|
||||
- Type: SizeSuffix
|
||||
@@ -263,6 +275,8 @@ Files larger than the size given below will always be hashed on disk.
|
||||
|
||||
What should copy do if file checksum is mismatched or invalid.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: check_hash
|
||||
- Env Var: RCLONE_MAILRU_CHECK_HASH
|
||||
- Type: bool
|
||||
@@ -279,10 +293,12 @@ HTTP user agent used internally by client.
|
||||
|
||||
Defaults to "rclone/VERSION" or "--user-agent" provided on command line.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user_agent
|
||||
- Env Var: RCLONE_MAILRU_USER_AGENT
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --mailru-quirks
|
||||
|
||||
@@ -294,17 +310,21 @@ flags is not documented and not guaranteed to persist between releases.
|
||||
Quirks will be removed when the backend grows stable.
|
||||
Supported quirks: atomicmkdir binlist unknowndirs
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: quirks
|
||||
- Env Var: RCLONE_MAILRU_QUIRKS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --mailru-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_MAILRU_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
@@ -160,10 +160,12 @@ Here are the standard options specific to mega (Mega).
|
||||
|
||||
User name.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: user
|
||||
- Env Var: RCLONE_MEGA_USER
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --mega-pass
|
||||
|
||||
@@ -171,10 +173,12 @@ Password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: pass
|
||||
- Env Var: RCLONE_MEGA_PASS
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -187,6 +191,8 @@ Output more debug from Mega.
|
||||
If this flag is set (along with -vv) it will print further debugging
|
||||
information from the mega backend.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: debug
|
||||
- Env Var: RCLONE_MEGA_DEBUG
|
||||
- Type: bool
|
||||
@@ -200,6 +206,8 @@ Normally the mega backend will put all deletions into the trash rather
|
||||
than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_MEGA_HARD_DELETE
|
||||
- Type: bool
|
||||
@@ -207,10 +215,12 @@ permanently delete objects instead.
|
||||
|
||||
#### --mega-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_MEGA_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
286
docs/content/netstorage.md
Normal file
286
docs/content/netstorage.md
Normal file
@@ -0,0 +1,286 @@
|
||||
---
|
||||
title: "Akamai Netstorage"
|
||||
description: "Rclone docs for Akamai NetStorage"
|
||||
---
|
||||
|
||||
# {{< icon "fas fa-database" >}} Akamai NetStorage
|
||||
|
||||
Paths are specified as `remote:`
|
||||
You may put subdirectories in too, e.g. `remote:/path/to/dir`.
|
||||
If you have a CP code you can use that as the folder after the domain such as \<domain>\/\<cpcode>\/\<internal directories within cpcode>.
|
||||
|
||||
For example, this is commonly configured with or without a CP code:
|
||||
* **With a CP code**. `[your-domain-prefix]-nsu.akamaihd.net/123456/subdirectory/`
|
||||
* **Without a CP code**. `[your-domain-prefix]-nsu.akamaihd.net`
|
||||
|
||||
|
||||
See all buckets
|
||||
rclone lsd remote:
|
||||
The initial setup for Netstorage involves getting an account and secret. Use `rclone config` to walk you through the setup process.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here's an example of how to make a remote called `ns1`.
|
||||
|
||||
1. To begin the interactive configuration process, enter this command:
|
||||
|
||||
```
|
||||
rclone config
|
||||
```
|
||||
|
||||
2. Type `n` to create a new remote.
|
||||
|
||||
```
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
q) Quit config
|
||||
e/n/d/q> n
|
||||
```
|
||||
|
||||
3. For this example, enter `ns1` when you reach the name> prompt.
|
||||
|
||||
```
|
||||
name> ns1
|
||||
```
|
||||
|
||||
4. Enter `netstorage` as the type of storage to configure.
|
||||
|
||||
```
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
XX / NetStorage
|
||||
\ "netstorage"
|
||||
Storage> netstorage
|
||||
```
|
||||
|
||||
5. Select between the HTTP or HTTPS protocol. Most users should choose HTTPS, which is the default. HTTP is provided primarily for debugging purposes.
|
||||
|
||||
|
||||
```
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / HTTP protocol
|
||||
\ "http"
|
||||
2 / HTTPS protocol
|
||||
\ "https"
|
||||
protocol> 1
|
||||
```
|
||||
|
||||
6. Specify your NetStorage host, CP code, and any necessary content paths using this format: `<domain>/<cpcode>/<content>/`
|
||||
|
||||
```
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
host> baseball-nsu.akamaihd.net/123456/content/
|
||||
```
|
||||
|
||||
7. Set the netstorage account name
|
||||
```
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
account> username
|
||||
```
|
||||
|
||||
8. Set the Netstorage account secret/G2O key which will be used for authentication purposes. Select the `y` option to set your own password then enter your secret.
|
||||
Note: The secret is stored in the `rclone.conf` file with hex-encoded encryption.
|
||||
|
||||
```
|
||||
y) Yes type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
```
|
||||
|
||||
9. View the summary and confirm your remote configuration.
|
||||
|
||||
```
|
||||
[ns1]
|
||||
type = netstorage
|
||||
protocol = http
|
||||
host = baseball-nsu.akamaihd.net/123456/content/
|
||||
account = username
|
||||
secret = *** ENCRYPTED ***
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
This remote is called `ns1` and can now be used.
|
||||
|
||||
## Example operations
|
||||
|
||||
Get started with rclone and NetStorage with these examples. For additional rclone commands, visit https://rclone.org/commands/.
|
||||
|
||||
### See contents of a directory in your project
|
||||
|
||||
rclone lsd ns1:/974012/testing/
|
||||
|
||||
### Sync the contents local with remote
|
||||
|
||||
rclone sync . ns1:/974012/testing/
|
||||
|
||||
### Upload local content to remote
|
||||
rclone copy notes.txt ns1:/974012/testing/
|
||||
|
||||
### Delete content on remote
|
||||
rclone delete ns1:/974012/testing/notes.txt
|
||||
|
||||
### Move or copy content between CP codes.
|
||||
|
||||
Your credentials must have access to two CP codes on the same remote. You can't perform operations between different remotes.
|
||||
|
||||
rclone move ns1:/974012/testing/notes.txt ns1:/974450/testing2/
|
||||
|
||||
## Features
|
||||
|
||||
### Symlink Support
|
||||
|
||||
The Netstorage backend changes the rclone `--links, -l` behavior. When uploading, instead of creating the .rclonelink file, use the "symlink" API in order to create the corresponding symlink on the remote. The .rclonelink file will not be created, the upload will be intercepted and only the symlink file that matches the source file name with no suffix will be created on the remote.
|
||||
|
||||
This will effectively allow commands like copy/copyto, move/moveto and sync to upload from local to remote and download from remote to local directories with symlinks. Due to internal rclone limitations, it is not possible to upload an individual symlink file to any remote backend. You can always use the "backend symlink" command to create a symlink on the NetStorage server, refer to "symlink" section below.
|
||||
|
||||
Individual symlink files on the remote can be used with the commands like "cat" to print the destination name, or "delete" to delete symlink, or copy, copy/to and move/moveto to download from the remote to local. Note: individual symlink files on the remote should be specified including the suffix .rclonelink.
|
||||
|
||||
**Note**: No file with the suffix .rclonelink should ever exist on the server since it is not possible to actually upload/create a file with .rclonelink suffix with rclone, it can only exist if it is manually created through a non-rclone method on the remote.
|
||||
|
||||
### Implicit vs. Explicit Directories
|
||||
|
||||
With NetStorage, directories can exist in one of two forms:
|
||||
|
||||
1. **Explicit Directory**. This is an actual, physical directory that you have created in a storage group.
|
||||
2. **Implicit Directory**. This refers to a directory within a path that has not been physically created. For example, during upload of a file, non-existent subdirectories can be specified in the target path. NetStorage creates these as "implicit." While the directories aren't physically created, they exist implicitly and the noted path is connected with the uploaded file.
|
||||
|
||||
Rclone will intercept all file uploads and mkdir commands for the NetStorage remote and will explicitly issue the mkdir command for each directory in the uploading path. This will help with the interoperability with the other Akamai services such as SFTP and the Content Management Shell (CMShell). Rclone will not guarantee correctness of operations with implicit directories which might have been created as a result of using an upload API directly.
|
||||
|
||||
### `--fast-list` / ListR support
|
||||
|
||||
NetStorage remote supports the ListR feature by using the "list" NetStorage API action to return a lexicographical list of all objects within the specified CP code, recursing into subdirectories as they're encountered.
|
||||
|
||||
* **Rclone will use the ListR method for some commands by default**. Commands such as `lsf -R` will use ListR by default. To disable this, include the `--disable listR` option to use the non-recursive method of listing objects.
|
||||
|
||||
* **Rclone will not use the ListR method for some commands**. Commands such as `sync` don't use ListR by default. To force using the ListR method, include the `--fast-list` option.
|
||||
|
||||
There are pros and cons of using the ListR method, refer to [rclone documentation](https://rclone.org/docs/#fast-list). In general, the sync command over an existing deep tree on the remote will run faster with the "--fast-list" flag but with extra memory usage as a side effect. It might also result in higher CPU utilization but the whole task can be completed faster.
|
||||
|
||||
**Note**: There is a known limitation that "lsf -R" will display number of files in the directory and directory size as -1 when ListR method is used. The workaround is to pass "--disable listR" flag if these numbers are important in the output.
|
||||
|
||||
### Purge
|
||||
|
||||
NetStorage remote supports the purge feature by using the "quick-delete" NetStorage API action. The quick-delete action is disabled by default for security reasons and can be enabled for the account through the Akamai portal. Rclone will first try to use quick-delete action for the purge command and if this functionality is disabled then will fall back to a standard delete method.
|
||||
|
||||
**Note**: Read the [NetStorage Usage API](https://learn.akamai.com/en-us/webhelp/netstorage/netstorage-http-api-developer-guide/GUID-15836617-9F50-405A-833C-EA2556756A30.html) for considerations when using "quick-delete". In general, using quick-delete method will not delete the tree immediately and objects targeted for quick-delete may still be accessible.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/netstorage/netstorage.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the standard options specific to netstorage (Akamai NetStorage).
|
||||
|
||||
#### --netstorage-host
|
||||
|
||||
Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be `<domain>/<internal folders>`
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: host
|
||||
- Env Var: RCLONE_NETSTORAGE_HOST
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --netstorage-account
|
||||
|
||||
Set the NetStorage account name
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: account
|
||||
- Env Var: RCLONE_NETSTORAGE_ACCOUNT
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --netstorage-secret
|
||||
|
||||
Set the NetStorage account secret/G2O key for authentication.
|
||||
|
||||
Please choose the 'y' option to set your own password then enter your secret.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: secret
|
||||
- Env Var: RCLONE_NETSTORAGE_SECRET
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the advanced options specific to netstorage (Akamai NetStorage).
|
||||
|
||||
#### --netstorage-protocol
|
||||
|
||||
Select between HTTP or HTTPS protocol.
|
||||
|
||||
Most users should choose HTTPS, which is the default.
|
||||
HTTP is provided primarily for debugging purposes.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: protocol
|
||||
- Env Var: RCLONE_NETSTORAGE_PROTOCOL
|
||||
- Type: string
|
||||
- Default: "https"
|
||||
- Examples:
|
||||
- "http"
|
||||
- HTTP protocol
|
||||
- "https"
|
||||
- HTTPS protocol
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the netstorage backend.
|
||||
|
||||
Run them with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See [the "rclone backend" command](/commands/rclone_backend/) for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
### du
|
||||
|
||||
Return disk usage information for a specified directory
|
||||
|
||||
rclone backend du remote: [options] [<arguments>+]
|
||||
|
||||
The usage information returned, includes the targeted directory as well as all
|
||||
files stored in any sub-directories that may exist.
|
||||
|
||||
### symlink
|
||||
|
||||
You can create a symbolic link in ObjectStore with the symlink action.
|
||||
|
||||
rclone backend symlink remote: [options] [<arguments>+]
|
||||
|
||||
The desired path location (including applicable sub-directories) ending in
|
||||
the object that will be the target of the symlink (for example, /links/mylink).
|
||||
Include the file extension for the object, if applicable.
|
||||
`rclone backend symlink <src> <path>`
|
||||
|
||||
## Support
|
||||
|
||||
If you have any questions or issues, please contact [Akamai Technical Support
|
||||
via Control Center or by
|
||||
phone](https://control.akamai.com/apps/support-ui/#/contact-support).
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
@@ -204,10 +204,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_ONEDRIVE_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-client-secret
|
||||
|
||||
@@ -215,15 +217,19 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_ONEDRIVE_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-region
|
||||
|
||||
Choose national cloud region for OneDrive.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_ONEDRIVE_REGION
|
||||
- Type: string
|
||||
@@ -246,10 +252,12 @@ Here are the advanced options specific to onedrive (Microsoft OneDrive).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_ONEDRIVE_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-auth-url
|
||||
|
||||
@@ -257,10 +265,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_ONEDRIVE_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-token-url
|
||||
|
||||
@@ -268,10 +278,12 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_ONEDRIVE_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-chunk-size
|
||||
|
||||
@@ -281,6 +293,8 @@ Above this size files will be chunked - must be multiple of 320k (327,680 bytes)
|
||||
should not exceed 250M (262,144,000 bytes) else you may encounter \"Microsoft.SharePoint.Client.InvalidClientQueryException: The request message is too big.\"
|
||||
Note that the chunks will be buffered into memory.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_ONEDRIVE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
@@ -290,30 +304,69 @@ Note that the chunks will be buffered into memory.
|
||||
|
||||
The ID of the drive to use.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: drive_id
|
||||
- Env Var: RCLONE_ONEDRIVE_DRIVE_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-drive-type
|
||||
|
||||
The type of the drive (personal | business | documentLibrary).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: drive_type
|
||||
- Env Var: RCLONE_ONEDRIVE_DRIVE_TYPE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-root-folder-id
|
||||
|
||||
ID of the root folder.
|
||||
|
||||
This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_ONEDRIVE_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-disable-site-permission
|
||||
|
||||
Disable the request for Sites.Read.All permission.
|
||||
|
||||
If set to true, you will no longer be able to search for a SharePoint site when
|
||||
configuring drive ID, because rclone will not request Sites.Read.All permission.
|
||||
Set it to true if your organization didn't assign Sites.Read.All permission to the
|
||||
application, and your organization disallows users to consent app permission
|
||||
request on their own.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_site_permission
|
||||
- Env Var: RCLONE_ONEDRIVE_DISABLE_SITE_PERMISSION
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --onedrive-expose-onenote-files
|
||||
|
||||
Set to make OneNote files show up in directory listings.
|
||||
|
||||
By default rclone will hide OneNote files in directory listings because
|
||||
By default, rclone will hide OneNote files in directory listings because
|
||||
operations like "Open" and "Update" won't work on them. But this
|
||||
behaviour may also prevent you from deleting them. If you want to
|
||||
delete OneNote files or otherwise want them to show up in directory
|
||||
listing, set this option.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: expose_onenote_files
|
||||
- Env Var: RCLONE_ONEDRIVE_EXPOSE_ONENOTE_FILES
|
||||
- Type: bool
|
||||
@@ -327,6 +380,8 @@ This will only work if you are copying between two OneDrive *Personal* drives AN
|
||||
the files to copy are already shared between them. In other cases, rclone will
|
||||
fall back to normal copy (which will be slightly slower).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: server_side_across_configs
|
||||
- Env Var: RCLONE_ONEDRIVE_SERVER_SIDE_ACROSS_CONFIGS
|
||||
- Type: bool
|
||||
@@ -336,6 +391,8 @@ fall back to normal copy (which will be slightly slower).
|
||||
|
||||
Size of listing chunk.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_ONEDRIVE_LIST_CHUNK
|
||||
- Type: int
|
||||
@@ -357,6 +414,8 @@ modification time and removes all but the last version.
|
||||
this flag there.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: no_versions
|
||||
- Env Var: RCLONE_ONEDRIVE_NO_VERSIONS
|
||||
- Type: bool
|
||||
@@ -366,6 +425,8 @@ this flag there.
|
||||
|
||||
Set the scope of the links created by the link command.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: link_scope
|
||||
- Env Var: RCLONE_ONEDRIVE_LINK_SCOPE
|
||||
- Type: string
|
||||
@@ -383,6 +444,8 @@ Set the scope of the links created by the link command.
|
||||
|
||||
Set the type of the links created by the link command.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: link_type
|
||||
- Env Var: RCLONE_ONEDRIVE_LINK_TYPE
|
||||
- Type: string
|
||||
@@ -402,17 +465,21 @@ Set the password for links created by the link command.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: link_password
|
||||
- Env Var: RCLONE_ONEDRIVE_LINK_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --onedrive-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_ONEDRIVE_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -562,7 +629,7 @@ are converted you will no longer need the ignore options above.
|
||||
It is a [known](https://github.com/OneDrive/onedrive-api-docs/issues/1068) issue
|
||||
that Sharepoint (not OneDrive or OneDrive for Business) may return "item not
|
||||
found" errors when users try to replace or delete uploaded files; this seems to
|
||||
mainly affect Office files (.docx, .xlsx, etc.). As a workaround, you may use
|
||||
mainly affect Office files (.docx, .xlsx, etc.) and web files (.html, .aspx, etc.). As a workaround, you may use
|
||||
the `--backup-dir <BACKUP_DIR>` command line argument so rclone moves the
|
||||
files to be replaced/deleted into a given backup directory (instead of directly
|
||||
replacing/deleting them). For example, to instruct rclone to move the files into
|
||||
|
||||
@@ -108,10 +108,12 @@ Here are the standard options specific to opendrive (OpenDrive).
|
||||
|
||||
Username.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: username
|
||||
- Env Var: RCLONE_OPENDRIVE_USERNAME
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
#### --opendrive-password
|
||||
|
||||
@@ -119,10 +121,12 @@ Password.
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_OPENDRIVE_PASSWORD
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -130,10 +134,12 @@ Here are the advanced options specific to opendrive (OpenDrive).
|
||||
|
||||
#### --opendrive-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_OPENDRIVE_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -146,6 +152,8 @@ Files will be uploaded in chunks this size.
|
||||
Note that these chunks are buffered in memory so increasing them will
|
||||
increase memory use.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_OPENDRIVE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
|
||||
@@ -32,6 +32,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| HDFS | - | Yes | No | No | - |
|
||||
| HTTP | - | No | No | No | R |
|
||||
| Hubic | MD5 | Yes | No | No | R/W |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | Yes | No | No | - |
|
||||
| Jottacloud | MD5 | Yes | Yes | No | R |
|
||||
| Koofr | MD5 | No | Yes | No | - |
|
||||
| Mail.ru Cloud | Mailru ⁶ | Yes | Yes | No | - |
|
||||
@@ -301,35 +302,36 @@ list of all possible values by passing an invalid value to this
|
||||
flag, e.g. `--local-encoding "help"`. The command `rclone help flags encoding`
|
||||
will show you the defaults for the backends.
|
||||
|
||||
| Encoding | Characters |
|
||||
| --------- | ---------- |
|
||||
| Asterisk | `*` |
|
||||
| BackQuote | `` ` `` |
|
||||
| BackSlash | `\` |
|
||||
| Colon | `:` |
|
||||
| CrLf | CR 0x0D, LF 0x0A |
|
||||
| Ctl | All control characters 0x00-0x1F |
|
||||
| Del | DEL 0x7F |
|
||||
| Dollar | `$` |
|
||||
| Dot | `.` or `..` as entire string |
|
||||
| DoubleQuote | `"` |
|
||||
| Hash | `#` |
|
||||
| InvalidUtf8 | An invalid UTF-8 character (e.g. latin1) |
|
||||
| LeftCrLfHtVt | CR 0x0D, LF 0x0A,HT 0x09, VT 0x0B on the left of a string |
|
||||
| LeftPeriod | `.` on the left of a string |
|
||||
| LeftSpace | SPACE on the left of a string |
|
||||
| LeftTilde | `~` on the left of a string |
|
||||
| LtGt | `<`, `>` |
|
||||
| None | No characters are encoded |
|
||||
| Percent | `%` |
|
||||
| Pipe | \| |
|
||||
| Question | `?` |
|
||||
| RightCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the right of a string |
|
||||
| RightPeriod | `.` on the right of a string |
|
||||
| RightSpace | SPACE on the right of a string |
|
||||
| SingleQuote | `'` |
|
||||
| Slash | `/` |
|
||||
| SquareBracket | `[`, `]` |
|
||||
| Encoding | Characters | Encoded as |
|
||||
| --------- | ---------- | ---------- |
|
||||
| Asterisk | `*` | `*` |
|
||||
| BackQuote | `` ` `` | ``` |
|
||||
| BackSlash | `\` | `\` |
|
||||
| Colon | `:` | `:` |
|
||||
| CrLf | CR 0x0D, LF 0x0A | `␍`, `␊` |
|
||||
| Ctl | All control characters 0x00-0x1F | `␀␁␂␃␄␅␆␇␈␉␊␋␌␍␎␏␐␑␒␓␔␕␖␗␘␙␚␛␜␝␞␟` |
|
||||
| Del | DEL 0x7F | `␡` |
|
||||
| Dollar | `$` | `$` |
|
||||
| Dot | `.` or `..` as entire string | `.`, `..` |
|
||||
| DoubleQuote | `"` | `"` |
|
||||
| Hash | `#` | `#` |
|
||||
| InvalidUtf8 | An invalid UTF-8 character (e.g. latin1) | `<60>` |
|
||||
| LeftCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the left of a string | `␍`, `␊`, `␉`, `␋` |
|
||||
| LeftPeriod | `.` on the left of a string | `.` |
|
||||
| LeftSpace | SPACE on the left of a string | `␠` |
|
||||
| LeftTilde | `~` on the left of a string | `~` |
|
||||
| LtGt | `<`, `>` | `<`, `>` |
|
||||
| None | No characters are encoded | |
|
||||
| Percent | `%` | `%` |
|
||||
| Pipe | \| | `|` |
|
||||
| Question | `?` | `?` |
|
||||
| RightCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the right of a string | `␍`, `␊`, `␉`, `␋` |
|
||||
| RightPeriod | `.` on the right of a string | `.` |
|
||||
| RightSpace | SPACE on the right of a string | `␠` |
|
||||
| Semicolon | `;` | `;` |
|
||||
| SingleQuote | `'` | `'` |
|
||||
| Slash | `/` | `/` |
|
||||
| SquareBracket | `[`, `]` | `[`, `]` |
|
||||
|
||||
##### Encoding example: FTP
|
||||
|
||||
@@ -426,6 +428,7 @@ upon backend-specific capabilities.
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| Mega | Yes | No | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
@@ -441,7 +444,7 @@ upon backend-specific capabilities.
|
||||
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
| SFTP | No | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes |
|
||||
| Storj | Yes † | No | No | No | No | Yes | Yes | No | No | No |
|
||||
| Storj | Yes † | No | Yes | No | No | Yes | Yes | No | No | No |
|
||||
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No | Yes | Yes |
|
||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | Yes |
|
||||
|
||||
@@ -145,10 +145,12 @@ OAuth Client Id.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_id
|
||||
- Env Var: RCLONE_PCLOUD_CLIENT_ID
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --pcloud-client-secret
|
||||
|
||||
@@ -156,10 +158,12 @@ OAuth Client Secret.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_secret
|
||||
- Env Var: RCLONE_PCLOUD_CLIENT_SECRET
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -169,10 +173,12 @@ Here are the advanced options specific to pcloud (Pcloud).
|
||||
|
||||
OAuth Access Token as a JSON blob.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token
|
||||
- Env Var: RCLONE_PCLOUD_TOKEN
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --pcloud-auth-url
|
||||
|
||||
@@ -180,10 +186,12 @@ Auth server URL.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_url
|
||||
- Env Var: RCLONE_PCLOUD_AUTH_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --pcloud-token-url
|
||||
|
||||
@@ -191,17 +199,21 @@ Token server url.
|
||||
|
||||
Leave blank to use the provider defaults.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: token_url
|
||||
- Env Var: RCLONE_PCLOUD_TOKEN_URL
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
#### --pcloud-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_PCLOUD_ENCODING
|
||||
- Type: MultiEncoder
|
||||
@@ -211,6 +223,8 @@ See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_PCLOUD_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
@@ -225,6 +239,8 @@ however you will need to set it by hand if you are using remote config
|
||||
with rclone authorize.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: hostname
|
||||
- Env Var: RCLONE_PCLOUD_HOSTNAME
|
||||
- Type: string
|
||||
|
||||
@@ -113,10 +113,12 @@ API Key.
|
||||
This is not normally used - use oauth instead.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_PREMIUMIZEME_API_KEY
|
||||
- Type: string
|
||||
- Default: ""
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -124,10 +126,12 @@ Here are the advanced options specific to premiumizeme (premiumize.me).
|
||||
|
||||
#### --premiumizeme-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_PREMIUMIZEME_ENCODING
|
||||
- Type: MultiEncoder
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user