1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
a9adc7007e build(deps): bump DavidAnson/markdownlint-cli2-action from 20 to 21
Bumps [DavidAnson/markdownlint-cli2-action](https://github.com/davidanson/markdownlint-cli2-action) from 20 to 21.
- [Release notes](https://github.com/davidanson/markdownlint-cli2-action/releases)
- [Commits](https://github.com/davidanson/markdownlint-cli2-action/compare/v20...v21)

---
updated-dependencies:
- dependency-name: DavidAnson/markdownlint-cli2-action
  dependency-version: '21'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-11-17 22:07:50 +00:00
181 changed files with 16976 additions and 47532 deletions

View File

@@ -95,7 +95,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -216,7 +216,7 @@ jobs:
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -283,7 +283,7 @@ jobs:
run: govulncheck ./... run: govulncheck ./...
- name: Check Markdown format - name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20 uses: DavidAnson/markdownlint-cli2-action@v21
with: with:
globs: | globs: |
CONTRIBUTING.md CONTRIBUTING.md
@@ -307,7 +307,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -52,7 +52,7 @@ jobs:
df -h . df -h .
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v6 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0

View File

@@ -30,7 +30,7 @@ jobs:
sudo rm -rf /usr/share/dotnet || true sudo rm -rf /usr/share/dotnet || true
df -h . df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v6 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

15185
MANUAL.html generated

File diff suppressed because it is too large Load Diff

17744
MANUAL.md generated

File diff suppressed because it is too large Load Diff

7900
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,6 @@ This file describes how to make the various kinds of releases
- make doc - make doc
- git status - to check for new man pages - git add them - git status - to check for new man pages - git add them
- git commit -a -v -m "Version v1.XX.0" - git commit -a -v -m "Version v1.XX.0"
- make check
- make retag - make retag
- git push origin # without --follow-tags so it doesn't push the tag if it fails - git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push --follow-tags origin - git push --follow-tags origin

View File

@@ -1 +1 @@
v1.73.0 v1.72.0

View File

@@ -16,7 +16,6 @@ import (
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi" _ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drime"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"

View File

@@ -86,56 +86,12 @@ var (
metadataMu sync.Mutex metadataMu sync.Mutex
) )
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"cache-control": {
Help: "Cache-Control header",
Type: "string",
Example: "no-cache",
},
"content-disposition": {
Help: "Content-Disposition header",
Type: "string",
Example: "inline",
},
"content-encoding": {
Help: "Content-Encoding header",
Type: "string",
Example: "gzip",
},
"content-language": {
Help: "Content-Language header",
Type: "string",
Example: "en-US",
},
"content-type": {
Help: "Content-Type header",
Type: "string",
Example: "text/plain",
},
"tier": {
Help: "Tier of the object",
Type: "string",
Example: "Hot",
ReadOnly: true,
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
}
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "azureblob", Name: "azureblob",
Description: "Microsoft Azure Blob Storage", Description: "Microsoft Azure Blob Storage",
NewFs: NewFs, NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
},
Options: []fs.Option{{ Options: []fs.Option{{
Name: "account", Name: "account",
Help: `Azure Storage Account Name. Help: `Azure Storage Account Name.
@@ -854,9 +810,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{ f.features = (&fs.Features{
ReadMimeType: true, ReadMimeType: true,
WriteMimeType: true, WriteMimeType: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
BucketBased: true, BucketBased: true,
BucketBasedRootOK: true, BucketBasedRootOK: true,
SetTier: true, SetTier: true,
@@ -1204,289 +1157,6 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
o.meta[modTimeKey] = modTime.Format(timeFormatOut) o.meta[modTimeKey] = modTime.Format(timeFormatOut)
} }
// parseXMsTags parses the value of the x-ms-tags header into a map.
// It expects comma-separated key=value pairs. Whitespace around keys and
// values is trimmed. Empty pairs and empty keys are rejected.
func parseXMsTags(s string) (map[string]string, error) {
if strings.TrimSpace(s) == "" {
return map[string]string{}, nil
}
out := make(map[string]string)
parts := strings.Split(s, ",")
for _, p := range parts {
p = strings.TrimSpace(p)
if p == "" {
continue
}
kv := strings.SplitN(p, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid tag %q", p)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
if k == "" {
return nil, fmt.Errorf("invalid tag key in %q", p)
}
out[k] = v
}
return out, nil
}
// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers,
// user metadata, tags and optional modTime override.
// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata.
//
// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime).
func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) {
if meta == nil {
return headers, nil, nil, nil, nil
}
tmp := make(map[string]string)
for k, v := range meta {
lowerKey := strings.ToLower(k)
switch lowerKey {
case "cache-control":
headers.BlobCacheControl = pString(v)
case "content-disposition":
headers.BlobContentDisposition = pString(v)
case "content-encoding":
headers.BlobContentEncoding = pString(v)
case "content-language":
headers.BlobContentLanguage = pString(v)
case "content-type":
headers.BlobContentType = pString(v)
case "x-ms-tags":
parsed, perr := parseXMsTags(v)
if perr != nil {
return headers, nil, nil, nil, perr
}
// allocate only if there are tags
if len(parsed) > 0 {
tags = parsed
}
case "mtime":
// Accept multiple layouts for tolerance
var parsed time.Time
var pErr error
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} {
parsed, pErr = time.Parse(layout, v)
if pErr == nil {
modTime = &parsed
break
}
}
// Log and ignore if unparseable
if modTime == nil && logf != nil {
logf("metadata: couldn't parse mtime %q: %v", v, pErr)
}
case "tier":
// ignore - handled elsewhere
default:
// Filter out other reserved headers so they don't end up as user metadata
if strings.HasPrefix(lowerKey, "x-ms-") {
continue
}
tmp[lowerKey] = v
}
}
userMeta = toAzureMetaPtr(tmp)
return headers, userMeta, tags, modTime, nil
}
// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK
func toAzureMetaPtr(in map[string]string) map[string]*string {
if len(in) == 0 {
return nil
}
out := make(map[string]*string, len(in))
for k, v := range in {
vv := v
out[k] = &vv
}
return out
}
// assembleCopyParams prepares headers, metadata and tags for copy operations.
//
// It starts from the source properties, optionally overlays mapped metadata
// from rclone's metadata options, ensures mtime presence when mapping is
// enabled, and returns whether mapping was actually requested (hadMapping).
// assembleCopyParams prepares headers, metadata and tags for copy operations.
//
// If includeBaseMeta is true, start user metadata from the source's metadata
// and overlay mapped values. This matches multipart copy commit behavior.
// If false, only include mapped user metadata (no source baseline) which
// matches previous singlepart StartCopyFromURL semantics.
func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) {
// Start from source properties
headers = blob.HTTPHeaders{
BlobCacheControl: srcProps.CacheControl,
BlobContentDisposition: srcProps.ContentDisposition,
BlobContentEncoding: srcProps.ContentEncoding,
BlobContentLanguage: srcProps.ContentLanguage,
BlobContentMD5: srcProps.ContentMD5,
BlobContentType: srcProps.ContentType,
}
// Optionally deep copy user metadata pointers from source. Normalise keys to
// lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay
// metadata (Azure treats keys case-insensitively but Go's http.Header will
// join duplicate keys into a comma separated list, which breaks shared-key
// signing).
if includeBaseMeta && len(srcProps.Metadata) > 0 {
meta = make(map[string]*string, len(srcProps.Metadata))
for k, v := range srcProps.Metadata {
if v != nil {
vv := *v
meta[strings.ToLower(k)] = &vv
}
}
}
// Only consider mapping if metadata pipeline is enabled
if fs.GetConfig(ctx).Metadata {
mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if mapErr != nil {
return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr)
}
if mapped != nil {
// Map rclone metadata to Azure shapes
mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) })
if herr != nil {
return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr)
}
hadMapping = true
// Overlay headers (only non-nil)
if mappedHeaders.BlobCacheControl != nil {
headers.BlobCacheControl = mappedHeaders.BlobCacheControl
}
if mappedHeaders.BlobContentDisposition != nil {
headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition
}
if mappedHeaders.BlobContentEncoding != nil {
headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding
}
if mappedHeaders.BlobContentLanguage != nil {
headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage
}
if mappedHeaders.BlobContentType != nil {
headers.BlobContentType = mappedHeaders.BlobContentType
}
// Overlay user metadata
if len(userMeta) > 0 {
if meta == nil {
meta = make(map[string]*string, len(userMeta))
}
for k, v := range userMeta {
meta[k] = v
}
}
// Apply tags if any
if len(mappedTags) > 0 {
tags = mappedTags
}
// Ensure mtime present using mapped or source time
if _, ok := meta[modTimeKey]; !ok {
when := src.ModTime(ctx)
if mappedModTime != nil {
when = *mappedModTime
}
val := when.Format(time.RFC3339Nano)
if meta == nil {
meta = make(map[string]*string, 1)
}
meta[modTimeKey] = &val
}
// Ensure content-type fallback to source if not set by mapper
if headers.BlobContentType == nil {
headers.BlobContentType = srcProps.ContentType
}
} else {
// Mapping enabled but not provided: ensure mtime present based on source ModTime
if _, ok := meta[modTimeKey]; !ok {
when := src.ModTime(ctx)
val := when.Format(time.RFC3339Nano)
if meta == nil {
meta = make(map[string]*string, 1)
}
meta[modTimeKey] = &val
}
}
}
return headers, meta, tags, hadMapping, nil
}
// applyMappedMetadata applies mapped metadata and headers to the object state for uploads.
//
// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions
// and updates o.meta, o.tags and ui.httpHeaders accordingly.
func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) {
// Start from the source modtime; may be overridden by metadata
modTime = src.ModTime(ctx)
// Fetch mapped metadata if --metadata is enabled
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return modTime, err
}
if meta == nil {
// No metadata processing requested
return modTime, nil
}
// Map metadata using common helper
headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) })
if err != nil {
return modTime, err
}
// Merge headers into ui
if headers.BlobCacheControl != nil {
ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl
}
if headers.BlobContentDisposition != nil {
ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition
}
if headers.BlobContentEncoding != nil {
ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding
}
if headers.BlobContentLanguage != nil {
ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage
}
if headers.BlobContentType != nil {
ui.httpHeaders.BlobContentType = headers.BlobContentType
}
// Apply user metadata to o.meta with a single critical section
if len(userMeta) > 0 {
metadataMu.Lock()
if o.meta == nil {
o.meta = make(map[string]string, len(userMeta))
}
for k, v := range userMeta {
if v != nil {
o.meta[k] = *v
}
}
metadataMu.Unlock()
}
// Apply tags
if len(tags) > 0 {
if o.tags == nil {
o.tags = make(map[string]string, len(tags))
}
for k, v := range tags {
o.tags[k] = v
}
}
if mappedModTime != nil {
modTime = *mappedModTime
}
return modTime, nil
}
// Returns whether file is a directory marker or not // Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool { func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
// Directory markers are 0 length // Directory markers are 0 length
@@ -2281,19 +1951,18 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
return nil, err return nil, err
} }
// Prepare metadata/headers/tags for destination // Convert metadata from source object
// For multipart commit, include base metadata from source then overlay mapped
commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true)
if err != nil {
return nil, fmt.Errorf("multipart copy: %w", err)
}
// Convert metadata from source or mapper
options := blockblob.CommitBlockListOptions{ options := blockblob.CommitBlockListOptions{
Metadata: commitMeta, Metadata: srcProperties.Metadata,
Tags: commitTags, Tier: parseTier(f.opt.AccessTier),
Tier: parseTier(f.opt.AccessTier), HTTPHeaders: &blob.HTTPHeaders{
HTTPHeaders: &commitHeaders, BlobCacheControl: srcProperties.CacheControl,
BlobContentDisposition: srcProperties.ContentDisposition,
BlobContentEncoding: srcProperties.ContentEncoding,
BlobContentLanguage: srcProperties.ContentLanguage,
BlobContentMD5: srcProperties.ContentMD5,
BlobContentType: srcProperties.ContentType,
},
} }
// Finalise the upload session // Finalise the upload session
@@ -2324,36 +1993,10 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
return nil, fmt.Errorf("single part copy: source auth: %w", err) return nil, fmt.Errorf("single part copy: source auth: %w", err)
} }
// Prepare mapped metadata/tags/headers if requested // Start the copy
options := blob.StartCopyFromURLOptions{ options := blob.StartCopyFromURLOptions{
Tier: parseTier(f.opt.AccessTier), Tier: parseTier(f.opt.AccessTier),
} }
var postHeaders *blob.HTTPHeaders
// Read source properties and assemble params; this also handles the case when mapping is disabled
srcProps, err := src.readMetaDataAlways(ctx)
if err != nil {
return nil, fmt.Errorf("single part copy: read source properties: %w", err)
}
// For singlepart copy, do not include base metadata from source in StartCopyFromURL
headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false)
if aerr != nil {
return nil, fmt.Errorf("single part copy: %w", aerr)
}
// Apply tags and post-copy headers only when mapping requested changes
if len(tags) > 0 {
options.BlobTags = make(map[string]string, len(tags))
for k, v := range tags {
options.BlobTags[k] = v
}
}
if hadMapping {
// Only set metadata explicitly when mapping was requested; otherwise
// let the service copy source metadata (including mtime) automatically.
if len(meta) > 0 {
options.Metadata = meta
}
postHeaders = &headers
}
var startCopy blob.StartCopyFromURLResponse var startCopy blob.StartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options) startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
@@ -2383,16 +2026,6 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
pollTime = min(2*pollTime, time.Second) pollTime = min(2*pollTime, time.Second)
} }
// If mapper requested header changes, set them post-copy
if postHeaders != nil {
blb := f.getBlobSVC(dstContainer, dstPath)
_, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil)
if setErr != nil {
return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr)
}
}
// Metadata (when requested) is set via StartCopyFromURL options.Metadata
return f.NewObject(ctx, remote) return f.NewObject(ctx, remote)
} }
@@ -2524,35 +2157,6 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
return metadata return metadata
} }
// Metadata returns metadata for an object
//
// It returns a combined view of system and user metadata.
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
// Ensure metadata is loaded
if err := o.readMetaData(ctx); err != nil {
return nil, err
}
m := fs.Metadata{}
// System metadata we expose
if !o.modTime.IsZero() {
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
}
if o.accessTier != "" {
m["tier"] = string(o.accessTier)
}
// Merge user metadata (already lower-cased keys)
metadataMu.Lock()
for k, v := range o.meta {
m[k] = v
}
metadataMu.Unlock()
return m, nil
}
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in // decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
// //
// Sets // Sets
@@ -3391,18 +2995,16 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
// containerPath = containerPath[:len(containerPath)-1] // containerPath = containerPath[:len(containerPath)-1]
// } // }
// Start with default content-type based on source // Update Mod time
ui.httpHeaders = blob.HTTPHeaders{ o.updateMetadataWithModTime(src.ModTime(ctx))
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Apply mapped metadata/headers/tags if requested
modTime, err := o.applyMappedMetadata(ctx, src, &ui, options)
if err != nil { if err != nil {
return ui, err return ui, err
} }
// Ensure mtime is set in metadata based on possibly overridden modTime
o.updateMetadataWithModTime(modTime) // Create the HTTP headers for the upload
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it // Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header

View File

@@ -5,16 +5,11 @@ package azureblob
import ( import (
"context" "context"
"encoding/base64" "encoding/base64"
"fmt"
"net/http"
"strings" "strings"
"testing" "testing"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
@@ -153,417 +148,4 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
func (f *Fs) InternalTest(t *testing.T) { func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures) t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks) t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
t.Run("Metadata", f.testMetadataPaths)
}
// helper to read blob properties for an object
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
ao := o.(*Object)
props, err := ao.readMetaDataAlways(ctx)
require.NoError(t, err)
return props
}
// helper to assert select headers and user metadata
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
// Headers
get := func(p *string) string {
if p == nil {
return ""
}
return *p
}
if v, ok := want["content-type"]; ok {
assert.Equal(t, v, get(props.ContentType), "content-type")
}
if v, ok := want["cache-control"]; ok {
assert.Equal(t, v, get(props.CacheControl), "cache-control")
}
if v, ok := want["content-disposition"]; ok {
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
}
if v, ok := want["content-encoding"]; ok {
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
}
if v, ok := want["content-language"]; ok {
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
}
// User metadata (case-insensitive keys from service)
norm := make(map[string]*string, len(props.Metadata))
for kk, vv := range props.Metadata {
norm[strings.ToLower(kk)] = vv
}
for k, v := range wantUserMeta {
pv, ok := norm[strings.ToLower(k)]
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
if pv == nil {
assert.Equal(t, v, "", k)
} else {
assert.Equal(t, v, *pv, k)
}
} else {
// Log available keys for diagnostics
keys := make([]string, 0, len(props.Metadata))
for kk := range props.Metadata {
keys = append(keys, kk)
}
t.Logf("available user metadata keys: %v", keys)
}
}
}
// helper to read blob tags for an object
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
ao := o.(*Object)
blb := ao.getBlobSVC()
resp, err := blb.GetTags(ctx, nil)
require.NoError(t, err)
out := make(map[string]string)
for _, tag := range resp.BlobTagSet {
if tag.Key != nil {
k := *tag.Key
v := ""
if tag.Value != nil {
v = *tag.Value
}
out[k] = v
}
}
return out
}
// Test metadata across different write paths
func (f *Fs) testMetadataPaths(t *testing.T) {
ctx := context.Background()
if testing.Short() {
t.Skip("skipping in short mode")
}
// Common expected metadata and headers
baseMeta := fs.Metadata{
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
// Note: Don't set content-encoding here to avoid download decoding differences
// We will set a custom user metadata key
"potato": "royal",
// and modtime
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
}
// Singlepart upload
t.Run("PutSinglepart", func(t *testing.T) {
// size less than chunk size
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// override content-type via metadata mapping
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "text/plain"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
})
// Multipart upload
t.Run("PutMultipart", func(t *testing.T) {
// size greater than chunk size to force multipart
contents := random.String(int(f.opt.ChunkSize + 1024))
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "application/json"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/json",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
// Tags: Singlepart upload
t.Run("PutSinglepartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "env=dev,team=sync",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "dev", tags["env"])
assert.Equal(t, "sync", tags["team"])
})
// Tags: Multipart upload
t.Run("PutMultipartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize + 2048))
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "project=alpha,release=2025-08",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "alpha", tags["project"])
assert.Equal(t, "2025-08", tags["release"])
})
})
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopySinglepart", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
// no content-type: should fallback to source
"potato": "maris",
}
// do copy
dstName := "meta-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (text/plain)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
}, map[string]string{
"potato": "maris",
})
// mtime should be populated on copy when --metadata is used
// and should equal the source ModTime (RFC3339Nano)
// Read user metadata (case-insensitive)
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
// parse and compare times ignoring formatting differences
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-single-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
}, map[string]string{})
// Assert mtime injected
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopyMultipart", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 1024))
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "max-age=0, no-cache",
// omit content-type to trigger fallback
"content-language": "de",
"potato": "desiree",
}
dstName := "meta-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (application/octet-stream)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
"cache-control": "max-age=0, no-cache",
"content-language": "de",
}, map[string]string{
"potato": "desiree",
})
// mtime should be populated on copy when --metadata is used
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.CopyCutoff + 2048))
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-multi-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
}, map[string]string{})
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Tags: Singlepart copy
t.Run("CopySinglepartTags", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet including tags
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=single,mode=test",
}
dstName := "tags-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "single", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Tags: Multipart copy
t.Run("CopyMultipartTags", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 4096))
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=multi,mode=test",
}
dstName := "tags-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "multi", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Negative: invalid x-ms-tags must error
t.Run("InvalidXMsTags", func(t *testing.T) {
contents := random.String(32)
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// construct ObjectInfo with invalid x-ms-tags
buf := strings.NewReader(contents)
// Build obj info with metadata
meta := fs.Metadata{
"x-ms-tags": "badpair-without-equals",
}
// force metadata on
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
_, err := f.Put(ctx2, buf, obji)
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid tag")
})
} }

View File

@@ -133,32 +133,23 @@ type File struct {
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file. Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
} }
// StorageAPI is as returned from the b2_authorize_account call // AuthorizeAccountResponse is as returned from the b2_authorize_account call
type StorageAPI struct { type AuthorizeAccountResponse struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
Buckets []struct { // When present, access is restricted to one or more buckets. BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
ID string `json:"id"` // ID of bucket BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Name string `json:"name"` // When present, name of bucket - may be empty Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
} `json:"buckets"`
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"` } `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files. DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead. MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance. RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
} }
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
Storage StorageAPI `json:"storageApi"`
} `json:"apiInfo"`
}
// ListBucketsRequest is parameters for b2_list_buckets call // ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct { type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account. AccountID string `json:"accountId"` // The identifier for the account.

View File

@@ -607,29 +607,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to authorize account: %w", err) return nil, fmt.Errorf("failed to authorize account: %w", err)
} }
// If this is a key limited to one or more buckets, one of them must exist // If this is a key limited to a single bucket, it must exist already
// and be ours. if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 { allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
buckets := f.info.APIs.Storage.Allowed.Buckets if allowedBucket == "" {
var rootFound = false return nil, errors.New("bucket that application key is restricted to no longer exists")
var rootID string
for _, b := range buckets {
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
if allowedBucket == "" {
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
continue
}
if allowedBucket == f.rootBucket {
rootFound = true
rootID = b.ID
}
} }
if !rootFound { if allowedBucket != f.rootBucket {
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets) return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
} }
f.cache.MarkOK(f.rootBucket) f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, rootID) f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
} }
if f.rootBucket != "" && f.rootDirectory != "" { if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the (bucket,directory) is actually an existing file // Check to see if the (bucket,directory) is actually an existing file
@@ -655,7 +643,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
defer f.authMu.Unlock() defer f.authMu.Unlock()
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/b2api/v4/b2_authorize_account", Path: "/b2api/v1/b2_authorize_account",
RootURL: f.opt.Endpoint, RootURL: f.opt.Endpoint,
UserName: f.opt.Account, UserName: f.opt.Account,
Password: f.opt.Key, Password: f.opt.Key,
@@ -668,13 +656,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to authenticate: %w", err) return fmt.Errorf("failed to authenticate: %w", err)
} }
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken) f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil return nil
} }
// hasPermission returns if the current AuthorizationToken has the selected permission // hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool { func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission) return slices.Contains(f.info.Allowed.Capabilities, permission)
} }
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -1079,68 +1067,44 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied // listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error { func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0] var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
for i := range f.info.APIs.Storage.Allowed.Buckets { BucketID: f.info.Allowed.BucketID,
b := &f.info.APIs.Storage.Allowed.Buckets[i] }
// Empty names indicate a bucket that no longer exists, this is non-fatal if bucketName != "" && account.BucketID == "" {
// for multi-bucket API keys. account.BucketName = f.opt.Enc.FromStandardName(bucketName)
if b.Name == "" {
continue
}
// When requesting a specific bucket skip over non-matching names
if bucketName != "" && b.Name != bucketName {
continue
}
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: b.ID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responses = append(responses, response)
} }
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
f.bucketIDMutex.Lock() f.bucketIDMutex.Lock()
f.bucketTypeMutex.Lock() f.bucketTypeMutex.Lock()
f._bucketID = make(map[string]string, 1) f._bucketID = make(map[string]string, 1)
f._bucketType = make(map[string]string, 1) f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
for ri := range responses { bucket := &response.Buckets[i]
response := &responses[ri] bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
for i := range response.Buckets { f.cache.MarkOK(bucket.Name)
bucket := &response.Buckets[i] f._bucketID[bucket.Name] = bucket.ID
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name) f._bucketType[bucket.Name] = bucket.Type
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
}
} }
f.bucketTypeMutex.Unlock() f.bucketTypeMutex.Unlock()
f.bucketIDMutex.Unlock() f.bucketIDMutex.Unlock()
for ri := range responses { for i := range response.Buckets {
response := &responses[ri] bucket := &response.Buckets[i]
for i := range response.Buckets { err = fn(bucket)
bucket := &response.Buckets[i] if err != nil {
err := fn(bucket) return err
if err != nil {
return err
}
} }
} }
return nil return nil
@@ -1642,7 +1606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
bucket, bucketPath := f.split(remote) bucket, bucketPath := f.split(remote)
var RootURL string var RootURL string
if f.opt.DownloadURL == "" { if f.opt.DownloadURL == "" {
RootURL = f.info.APIs.Storage.DownloadURL RootURL = f.info.DownloadURL
} else { } else {
RootURL = f.opt.DownloadURL RootURL = f.opt.DownloadURL
} }
@@ -1993,7 +1957,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
// Use downloadUrl from backblaze if downloadUrl is not set // Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl // otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" { if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL opts.RootURL = o.fs.info.DownloadURL
} else { } else {
opts.RootURL = o.fs.opt.DownloadURL opts.RootURL = o.fs.opt.DownloadURL
} }

View File

@@ -87,11 +87,13 @@ func init() {
Description: "Box", Description: "Box",
NewFs: NewFs, NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token") boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error var err error
// If using box config.json, use JWT auth // If using box config.json, use JWT auth
if usesJWTAuth(m) { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, name, m) err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err) return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
} }
@@ -112,11 +114,6 @@ func init() {
}, { }, {
Name: "box_config_file", Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp, Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "config_credentials",
Help: "Box App config.json contents.\n\nLeave blank normally.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, { }, {
Name: "access_token", Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.", Help: "Box App Primary Access Token\n\nLeave blank normally.",
@@ -187,17 +184,9 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
}) })
} }
func usesJWTAuth(m configmap.Mapper) bool { func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile, okFile := m.Get("box_config_file") jsonFile = env.ShellExpand(jsonFile)
jsonFileCredentials, okCredentials := m.Get("config_credentials") boxConfig, err := getBoxConfig(jsonFile)
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
}
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
boxSubType, _ := m.Get("box_sub_type")
boxConfig, err := getBoxConfig(m)
if err != nil { if err != nil {
return fmt.Errorf("get box config: %w", err) return fmt.Errorf("get box config: %w", err)
} }
@@ -216,19 +205,12 @@ func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error
return err return err
} }
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) { func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
configFileCredentials, _ := m.Get("config_credentials") file, err := os.ReadFile(configFile)
configFileBytes := []byte(configFileCredentials) if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
if configFileCredentials == "" {
configFile, _ := m.Get("box_config_file")
configFileBytes, err = os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
} }
err = json.Unmarshal(file, &boxConfig)
err = json.Unmarshal(configFileBytes, &boxConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("box: failed to parse Box config: %w", err) return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
} }
@@ -503,12 +485,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("as-user", f.opt.Impersonate) f.srv.SetHeader("as-user", f.opt.Impersonate)
} }
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil { if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and // If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not. // should do so whether there are uploads pending or not.
if usesJWTAuth(m) { if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, name, m) err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err return err
}) })
f.tokenRenewer.Start() f.tokenRenewer.Start()

View File

@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" { if ciphertext == "" {
return "", nil return "", nil
} }
before, after, ok := strings.Cut(ciphertext, ".") pos := strings.Index(ciphertext, ".")
if !ok { if pos == -1 {
return "", ErrorNotAnEncryptedFile return "", ErrorNotAnEncryptedFile
} // No . } // No .
num := before num := ciphertext[:pos]
if num == "!" { if num == "!" {
// No rotation; probably original was not valid unicode // No rotation; probably original was not valid unicode
return after, nil return ciphertext[pos+1:], nil
} }
dir, err := strconv.Atoi(num) dir, err := strconv.Atoi(num)
if err != nil { if err != nil {
@@ -425,7 +425,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
var result bytes.Buffer var result bytes.Buffer
inQuote := false inQuote := false
for _, runeValue := range after { for _, runeValue := range ciphertext[pos+1:] {
switch { switch {
case inQuote: case inQuote:
_, _ = result.WriteRune(runeValue) _, _ = result.WriteRune(runeValue)

View File

@@ -1,370 +0,0 @@
// Package api has type definitions for drime
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
// Types of things in Item
const (
ItemTypeFolder = "folder"
)
type User struct {
Email string `json:"email"`
ID json.Number `json:"id"`
Avatar string `json:"avatar"`
ModelType string `json:"model_type"`
OwnsEntry bool `json:"owns_entry"`
EntryPermissions []any `json:"entry_permissions"`
DisplayName string `json:"display_name"`
}
type Permissions struct {
FilesUpdate bool `json:"files.update"`
FilesCreate bool `json:"files.create"`
FilesDownload bool `json:"files.download"`
FilesDelete bool `json:"files.delete"`
}
// Item describes a folder or a file as returned by /drive/file-entries
type Item struct {
ID json.Number `json:"id"`
Name string `json:"name"`
Description any `json:"description"`
FileName string `json:"file_name"`
Mime string `json:"mime"`
Color any `json:"color"`
Backup bool `json:"backup"`
Tracked int `json:"tracked"`
FileSize int64 `json:"file_size"`
UserID json.Number `json:"user_id"`
ParentID json.Number `json:"parent_id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt any `json:"deleted_at"`
IsDeleted int `json:"is_deleted"`
Path string `json:"path"`
DiskPrefix any `json:"disk_prefix"`
Type string `json:"type"`
Extension any `json:"extension"`
FileHash any `json:"file_hash"`
Public bool `json:"public"`
Thumbnail bool `json:"thumbnail"`
MuxStatus any `json:"mux_status"`
ThumbnailURL any `json:"thumbnail_url"`
WorkspaceID int `json:"workspace_id"`
IsEncrypted int `json:"is_encrypted"`
Iv any `json:"iv"`
VaultID any `json:"vault_id"`
OwnerID int `json:"owner_id"`
Hash string `json:"hash"`
URL string `json:"url"`
Users []User `json:"users"`
Tags []any `json:"tags"`
Permissions Permissions `json:"permissions"`
}
type Listing struct {
CurrentPage int `json:"current_page"`
Data []Item `json:"data"`
From int `json:"from"`
LastPage int `json:"last_page"`
NextPage int `json:"next_page"`
PerPage int `json:"per_page"`
PrevPage int `json:"prev_page"`
To int `json:"to"`
Total int `json:"total"`
}
type UploadResponse struct {
Status string `json:"status"`
FileEntry Item `json:"fileEntry"`
}
type CreateFolderRequest struct {
Name string `json:"name"`
ParentID json.Number `json:"parentId,omitempty"`
}
type CreateFolderResponse struct {
Status string `json:"status"`
Folder Item `json:"folder"`
}
const (
// 2017-05-03T07:26:10-07:00
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// drime API, by using RFC3339
type Time time.Time
// MarshalJSON turns a Time into JSON (in UTC)
func (t *Time) MarshalJSON() (out []byte, err error) {
timeString := (*time.Time)(t).Format(timeFormat)
return []byte(timeString), nil
}
// UnmarshalJSON turns JSON into a Time
func (t *Time) UnmarshalJSON(data []byte) error {
newT, err := time.Parse(timeFormat, string(data))
if err != nil {
return err
}
*t = Time(newT)
return nil
}
// Error is returned from drime when things go wrong
type Error struct {
Status string `json:"status"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Status)
return out
}
// IsError returns true if there is an error
func (e Error) IsError() bool {
return e.Status != "ok"
}
// Err returns err if not nil, or e if IsError or nil
func (e Error) Err(err error) error {
if err != nil {
return err
}
if e.IsError() {
return e
}
return nil
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// Item describes a folder or a file as returned by /contents
type XXXItem struct {
ID string `json:"id"`
ParentFolder string `json:"parentFolder"`
Type string `json:"type"`
Name string `json:"name"`
Size int64 `json:"size"`
Code string `json:"code"`
CreateTime int64 `json:"createTime"`
ModTime int64 `json:"modTime"`
Link string `json:"link"`
MD5 string `json:"md5"`
MimeType string `json:"mimetype"`
ChildrenCount int `json:"childrenCount"`
DirectLinks map[string]*DirectLink `json:"directLinks"`
//Public bool `json:"public"`
//ServerSelected string `json:"serverSelected"`
//Thumbnail string `json:"thumbnail"`
//DownloadCount int `json:"downloadCount"`
//TotalDownloadCount int64 `json:"totalDownloadCount"`
//TotalSize int64 `json:"totalSize"`
//ChildrenIDs []string `json:"childrenIds"`
Children map[string]*Item `json:"children"`
}
// ToNativeTime converts a go time to a native time
func ToNativeTime(t time.Time) int64 {
return t.Unix()
}
// FromNativeTime converts native time to a go time
func FromNativeTime(t int64) time.Time {
return time.Unix(t, 0)
}
// DirectLink describes a direct link to a file so it can be
// downloaded by third parties.
type DirectLink struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
DirectLink string `json:"directLink"`
}
// Contents is returned from the /contents call
type Contents struct {
Error
Data struct {
Item
} `json:"data"`
Metadata Metadata `json:"metadata"`
}
// Metadata is returned when paging is in use
type Metadata struct {
TotalCount int `json:"totalCount"`
TotalPages int `json:"totalPages"`
Page int `json:"page"`
PageSize int `json:"pageSize"`
HasNextPage bool `json:"hasNextPage"`
}
// AccountsGetID is the result of /accounts/getid
type AccountsGetID struct {
Error
Data struct {
ID string `json:"id"`
} `json:"data"`
}
// Stats of storage and traffic
type Stats struct {
FolderCount int64 `json:"folderCount"`
FileCount int64 `json:"fileCount"`
Storage int64 `json:"storage"`
TrafficDirectGenerated int64 `json:"trafficDirectGenerated"`
TrafficReqDownloaded int64 `json:"trafficReqDownloaded"`
TrafficWebDownloaded int64 `json:"trafficWebDownloaded"`
}
// AccountsGet is the result of /accounts/{id}
type AccountsGet struct {
Error
Data struct {
ID string `json:"id"`
Email string `json:"email"`
Tier string `json:"tier"`
PremiumType string `json:"premiumType"`
Token string `json:"token"`
RootFolder string `json:"rootFolder"`
SubscriptionProvider string `json:"subscriptionProvider"`
SubscriptionEndDate int `json:"subscriptionEndDate"`
SubscriptionLimitDirectTraffic int64 `json:"subscriptionLimitDirectTraffic"`
SubscriptionLimitStorage int64 `json:"subscriptionLimitStorage"`
StatsCurrent Stats `json:"statsCurrent"`
// StatsHistory map[int]map[int]map[int]Stats `json:"statsHistory"`
} `json:"data"`
}
// CreateFolderRequest is the input to /contents/createFolder
// type CreateFolderRequest struct {
// ParentFolderID string `json:"parentFolderId"`
// FolderName string `json:"folderName"`
// ModTime int64 `json:"modTime,omitempty"`
// }
// CreateFolderResponse is the output from /contents/createFolder
// type CreateFolderResponse struct {
// Error
// Data Item `json:"data"`
// }
// DeleteRequest is the input to DELETE /contents
type DeleteRequest struct {
EntryIds []string `json:"entryIds"`
DeleteForever bool `json:"deleteForever"`
}
// DeleteResponse is the input to DELETE /contents
type DeleteResponse struct {
Error
Data map[string]Error
}
// DirectUploadURL returns the direct upload URL for Drime
func DirectUploadURL() string {
return "https://upload.drime.io/uploadfile"
}
// UploadResponse is returned by POST /contents/uploadfile
// type UploadResponse struct {
// Error
// Data Item `json:"data"`
// }
// DirectLinksRequest specifies the parameters for the direct link
type DirectLinksRequest struct {
ExpireTime int64 `json:"expireTime,omitempty"`
SourceIpsAllowed []any `json:"sourceIpsAllowed,omitempty"`
DomainsAllowed []any `json:"domainsAllowed,omitempty"`
Auth []any `json:"auth,omitempty"`
}
// DirectLinksResult is returned from POST /contents/{id}/directlinks
type DirectLinksResult struct {
Error
Data struct {
ExpireTime int64 `json:"expireTime"`
SourceIpsAllowed []any `json:"sourceIpsAllowed"`
DomainsAllowed []any `json:"domainsAllowed"`
Auth []any `json:"auth"`
IsReqLink bool `json:"isReqLink"`
ID string `json:"id"`
DirectLink string `json:"directLink"`
} `json:"data"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /contents/{id}/update
//
// The Value of the attribute to define :
// For Attribute "name" : The name of the content (file or folder)
// For Attribute "description" : The description displayed on the download page (folder only)
// For Attribute "tags" : A comma-separated list of tags (folder only)
// For Attribute "public" : either true or false (folder only)
// For Attribute "expiry" : A unix timestamp of the expiration date (folder only)
// For Attribute "password" : The password to set (folder only)
type UpdateItemRequest struct {
Attribute string `json:"attribute"`
Value any `json:"attributeValue"`
}
// UpdateItemResponse is returned by PUT /contents/{id}/update
type UpdateItemResponse struct {
Error
Data Item `json:"data"`
}
// MoveRequest is the input to /contents/move
type MoveRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// MoveResponse is returned by POST /contents/move
type MoveResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// CopyRequest is the input to /contents/copy
type CopyRequest struct {
FolderID string `json:"folderId"`
ContentsID string `json:"contentsId"` // comma separated list of IDs
}
// CopyResponse is returned by POST /contents/copy
type CopyResponse struct {
Error
Data map[string]struct {
Error
Item `json:"data"`
} `json:"data"`
}
// UploadServerStatus is returned when fetching the root of an upload server
type UploadServerStatus struct {
Error
Data struct {
Server string `json:"server"`
Test string `json:"test"`
} `json:"data"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Drime filesystem interface
package drime_test
import (
"testing"
"github.com/rclone/rclone/backend/drime"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrime:",
NilObject: (*drime.Object)(nil),
})
}

View File

@@ -346,26 +346,9 @@ can't check the size and hash but the file contents will be decompressed.
Advanced: true, Advanced: true,
Default: false, Default: false,
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: `Custom endpoint for the storage API. Leave blank to use the provider default. Help: "Endpoint for the service.\n\nLeave blank normally.",
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
the subpath will be ignored during upload operations due to a limitation in the
underlying Google API Go client library.
Download and listing operations will work correctly with the full endpoint path.
If you require subpath support for uploads, avoid using subpaths in your custom
endpoint configuration.`,
Advanced: true, Advanced: true,
Examples: []fs.OptionExample{{
Value: "storage.example.org",
Help: "Specify a custom endpoint",
}, {
Value: "storage.example.org:4443",
Help: "Specifying a custom endpoint with port",
}, {
Value: "storage.example.org:4443/gcs/api",
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
}},
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,

View File

@@ -30,11 +30,9 @@ import (
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4" v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awsconfig "github.com/aws/aws-sdk-go-v2/config" awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go" "github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/middleware"
@@ -327,30 +325,6 @@ If empty it will default to the environment variable "AWS_PROFILE" or
Help: "An AWS session token.", Help: "An AWS session token.",
Advanced: true, Advanced: true,
Sensitive: true, Sensitive: true,
}, {
Name: "role_arn",
Help: `ARN of the IAM role to assume.
Leave blank if not using assume role.`,
Advanced: true,
}, {
Name: "role_session_name",
Help: `Session name for assumed role.
If empty, a session name will be generated automatically.`,
Advanced: true,
}, {
Name: "role_session_duration",
Help: `Session duration for assumed role.
If empty, the default session duration will be used.`,
Advanced: true,
}, {
Name: "role_external_id",
Help: `External ID for assumed role.
Leave blank if not using an external ID.`,
Advanced: true,
}, { }, {
Name: "upload_concurrency", Name: "upload_concurrency",
Help: `Concurrency for multipart uploads and copies. Help: `Concurrency for multipart uploads and copies.
@@ -953,10 +927,6 @@ type Options struct {
SharedCredentialsFile string `config:"shared_credentials_file"` SharedCredentialsFile string `config:"shared_credentials_file"`
Profile string `config:"profile"` Profile string `config:"profile"`
SessionToken string `config:"session_token"` SessionToken string `config:"session_token"`
RoleARN string `config:"role_arn"`
RoleSessionName string `config:"role_session_name"`
RoleSessionDuration fs.Duration `config:"role_session_duration"`
RoleExternalID string `config:"role_external_id"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"` ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"` V2Auth bool `config:"v2_auth"`
@@ -1320,34 +1290,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
opt.Region = "us-east-1" opt.Region = "us-east-1"
} }
// Handle assume role if RoleARN is specified
if opt.RoleARN != "" {
fs.Debugf(nil, "Using assume role with ARN: %s", opt.RoleARN)
// Set region for the config before creating STS client
awsConfig.Region = opt.Region
// Create STS client using the base credentials
stsClient := sts.NewFromConfig(awsConfig)
// Configure AssumeRole options
assumeRoleOptions := func(aro *stscreds.AssumeRoleOptions) {
// Set session name if provided, otherwise use a default
if opt.RoleSessionName != "" {
aro.RoleSessionName = opt.RoleSessionName
}
if opt.RoleSessionDuration != 0 {
aro.Duration = time.Duration(opt.RoleSessionDuration)
}
if opt.RoleExternalID != "" {
aro.ExternalID = &opt.RoleExternalID
}
}
// Create AssumeRole credentials provider
awsConfig.Credentials = stscreds.NewAssumeRoleProvider(stsClient, opt.RoleARN, assumeRoleOptions)
}
provider = loadProvider(opt.Provider) provider = loadProvider(opt.Provider)
if provider == nil { if provider == nil {
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider) fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
@@ -1706,14 +1648,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
newRoot, leaf := path.Split(oldRoot) newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot) f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf) _, err := f.NewObject(ctx, leaf)
if errors.Is(err, fs.ErrorObjectNotFound) { if err != nil {
// File doesn't exist or is a directory so return old f // File doesn't exist or is a directory so return old f
f.setRoot(oldRoot) f.setRoot(oldRoot)
return f, nil return f, nil
} }
if err != nil {
return nil, err
}
// return an error with an fs which points to the parent // return an error with an fs which points to the parent
return f, fs.ErrorIsFile return f, fs.ErrorIsFile
} }
@@ -2893,8 +2832,6 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
SSECustomerKey: req.SSECustomerKey, SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5, SSECustomerKeyMD5: req.SSECustomerKeyMD5,
UploadId: uid, UploadId: uid,
IfMatch: copyReq.IfMatch,
IfNoneMatch: copyReq.IfNoneMatch,
}) })
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)
}) })
@@ -2929,20 +2866,13 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
MetadataDirective: types.MetadataDirectiveCopy, MetadataDirective: types.MetadataDirectiveCopy,
} }
// Build upload options including headers and metadata // Update the metadata if it is in use
ci := fs.GetConfig(ctx) if ci := fs.GetConfig(ctx); ci.Metadata {
uploadOptions := fs.MetadataAsOpenOptions(ctx) ui, err := srcObj.prepareUpload(ctx, src, fs.MetadataAsOpenOptions(ctx), true)
for _, option := range ci.UploadHeaders { if err != nil {
uploadOptions = append(uploadOptions, option) return nil, fmt.Errorf("failed to prepare upload: %w", err)
} }
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
ui, err := srcObj.prepareUpload(ctx, src, uploadOptions, true)
if err != nil {
return nil, fmt.Errorf("failed to prepare upload: %w", err)
}
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
if ci.Metadata {
req.MetadataDirective = types.MetadataDirectiveReplace req.MetadataDirective = types.MetadataDirectiveReplace
} }
@@ -4351,8 +4281,6 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey, SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5, SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
UploadId: w.uploadID, UploadId: w.uploadID,
IfMatch: w.ui.req.IfMatch,
IfNoneMatch: w.ui.req.IfNoneMatch,
}) })
return w.f.shouldRetry(ctx, err) return w.f.shouldRetry(ctx, err)
}) })

View File

@@ -70,7 +70,6 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b // setFrom_typesObject_typesObjectVersion copies matching elements from a to b
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) { func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
a.ChecksumAlgorithm = b.ChecksumAlgorithm a.ChecksumAlgorithm = b.ChecksumAlgorithm
a.ChecksumType = b.ChecksumType
a.ETag = b.ETag a.ETag = b.ETag
a.Key = b.Key a.Key = b.Key
a.LastModified = b.LastModified a.LastModified = b.LastModified
@@ -83,7 +82,6 @@ func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVers
func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) { func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) {
a.BucketKeyEnabled = b.BucketKeyEnabled a.BucketKeyEnabled = b.BucketKeyEnabled
a.CacheControl = b.CacheControl a.CacheControl = b.CacheControl
a.ChecksumType = b.ChecksumType
a.ContentDisposition = b.ContentDisposition a.ContentDisposition = b.ContentDisposition
a.ContentEncoding = b.ContentEncoding a.ContentEncoding = b.ContentEncoding
a.ContentLanguage = b.ContentLanguage a.ContentLanguage = b.ContentLanguage
@@ -162,15 +160,12 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
a.CacheControl = b.CacheControl a.CacheControl = b.CacheControl
a.ChecksumCRC32 = b.ChecksumCRC32 a.ChecksumCRC32 = b.ChecksumCRC32
a.ChecksumCRC32C = b.ChecksumCRC32C a.ChecksumCRC32C = b.ChecksumCRC32C
a.ChecksumCRC64NVME = b.ChecksumCRC64NVME
a.ChecksumSHA1 = b.ChecksumSHA1 a.ChecksumSHA1 = b.ChecksumSHA1
a.ChecksumSHA256 = b.ChecksumSHA256 a.ChecksumSHA256 = b.ChecksumSHA256
a.ChecksumType = b.ChecksumType
a.ContentDisposition = b.ContentDisposition a.ContentDisposition = b.ContentDisposition
a.ContentEncoding = b.ContentEncoding a.ContentEncoding = b.ContentEncoding
a.ContentLanguage = b.ContentLanguage a.ContentLanguage = b.ContentLanguage
a.ContentLength = b.ContentLength a.ContentLength = b.ContentLength
a.ContentRange = b.ContentRange
a.ContentType = b.ContentType a.ContentType = b.ContentType
a.DeleteMarker = b.DeleteMarker a.DeleteMarker = b.DeleteMarker
a.ETag = b.ETag a.ETag = b.ETag
@@ -192,7 +187,6 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
a.SSEKMSKeyId = b.SSEKMSKeyId a.SSEKMSKeyId = b.SSEKMSKeyId
a.ServerSideEncryption = b.ServerSideEncryption a.ServerSideEncryption = b.ServerSideEncryption
a.StorageClass = b.StorageClass a.StorageClass = b.StorageClass
a.TagCount = b.TagCount
a.VersionId = b.VersionId a.VersionId = b.VersionId
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
a.ResultMetadata = b.ResultMetadata a.ResultMetadata = b.ResultMetadata
@@ -238,7 +232,6 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
a.CacheControl = b.CacheControl a.CacheControl = b.CacheControl
a.ChecksumCRC32 = b.ChecksumCRC32 a.ChecksumCRC32 = b.ChecksumCRC32
a.ChecksumCRC32C = b.ChecksumCRC32C a.ChecksumCRC32C = b.ChecksumCRC32C
a.ChecksumCRC64NVME = b.ChecksumCRC64NVME
a.ChecksumSHA1 = b.ChecksumSHA1 a.ChecksumSHA1 = b.ChecksumSHA1
a.ChecksumSHA256 = b.ChecksumSHA256 a.ChecksumSHA256 = b.ChecksumSHA256
a.ContentDisposition = b.ContentDisposition a.ContentDisposition = b.ContentDisposition
@@ -277,8 +270,6 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
a.GrantRead = b.GrantRead a.GrantRead = b.GrantRead
a.GrantReadACP = b.GrantReadACP a.GrantReadACP = b.GrantReadACP
a.GrantWriteACP = b.GrantWriteACP a.GrantWriteACP = b.GrantWriteACP
a.IfMatch = b.IfMatch
a.IfNoneMatch = b.IfNoneMatch
a.Metadata = b.Metadata a.Metadata = b.Metadata
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
a.ObjectLockMode = b.ObjectLockMode a.ObjectLockMode = b.ObjectLockMode

View File

@@ -389,8 +389,8 @@ func parseHash(str string) (string, string, error) {
if str == "-" { if str == "-" {
return "", "", nil return "", "", nil
} }
if before, after, ok := strings.Cut(str, ":"); ok { if pos := strings.Index(str, ":"); pos > 0 {
name, val := before, after name, val := str[:pos], str[pos+1:]
if name != "" && val != "" { if name != "" && val != "" {
return name, val, nil return name, val, nil
} }

View File

@@ -58,10 +58,10 @@ type conn struct {
// interoperate with the rclone sftp backend // interoperate with the rclone sftp backend
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) { func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
binary, args := command, "" binary, args := command, ""
before, after, ok := strings.Cut(command, " ") space := strings.Index(command, " ")
if ok { if space >= 0 {
binary = before binary = command[:space]
args = strings.TrimLeft(after, " ") args = strings.TrimLeft(command[space+1:], " ")
} }
args = shellUnEscape(args) args = shellUnEscape(args)
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args) fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)

View File

@@ -45,10 +45,6 @@ var OptionsInfo = fs.Options{{
Name: "disable_dir_list", Name: "disable_dir_list",
Default: false, Default: false,
Help: "Disable HTML directory list on GET request for a directory", Help: "Disable HTML directory list on GET request for a directory",
}, {
Name: "disable_zip",
Default: false,
Help: "Disable zip download of directories",
}}. }}.
Add(libhttp.ConfigInfo). Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo). Add(libhttp.AuthConfigInfo).
@@ -61,7 +57,6 @@ type Options struct {
Template libhttp.TemplateConfig Template libhttp.TemplateConfig
EtagHash string `config:"etag_hash"` EtagHash string `config:"etag_hash"`
DisableDirList bool `config:"disable_dir_list"` DisableDirList bool `config:"disable_dir_list"`
DisableZip bool `config:"disable_zip"`
} }
// Opt is options set by command line flags // Opt is options set by command line flags
@@ -413,24 +408,6 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
return return
} }
dir := node.(*vfs.Dir) dir := node.(*vfs.Dir)
if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip {
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
zipName := path.Base(dirRemote)
if dirRemote == "" {
zipName = "root"
}
rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
rw.Header().Set("Content-Type", "application/zip")
rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
err := vfs.CreateZip(ctx, dir, rw)
if err != nil {
serve.Error(ctx, dirRemote, rw, "Failed to create zip", err)
return
}
return
}
dirEntries, err := dir.ReadDirAll() dirEntries, err := dir.ReadDirAll()
if err != nil { if err != nil {
@@ -440,7 +417,6 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
// Make the entries for display // Make the entries for display
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate()) directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
directory.DisableZip = w.opt.DisableZip
for _, node := range dirEntries { for _, node := range dirEntries {
if vfscommon.Opt.NoModTime { if vfscommon.Opt.NoModTime {
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{}) directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})

View File

@@ -56,22 +56,22 @@ var speedCmd = &cobra.Command{
Short: `Run a speed test to the remote`, Short: `Run a speed test to the remote`,
Long: `Run a speed test to the remote. Long: `Run a speed test to the remote.
This command runs a series of uploads and downloads to the remote, measuring This command runs a series of uploads and downloads to the remote, measuring
and printing the speed of each test using varying file sizes and numbers of and printing the speed of each test using varying file sizes and numbers of
files. files.
Test time can be innaccurate with small file caps and large files. As it Test time can be innaccurate with small file caps and large files. As it
uses the results of an initial test to determine how many files to use in uses the results of an initial test to determine how many files to use in
each subsequent test. each subsequent test.
It is recommended to use -q flag for a simpler output. e.g.: It is recommended to use -q flag for a simpler output. e.g.:
rlone test speed remote: -q
rclone test speed remote: -q **NB** This command will create and delete files on the remote in a randomly
named directory which should be tidied up after.
**NB** This command will create and delete files on the remote in a randomly You can use the --json flag to only print the results in JSON format.`,
named directory which will be automatically removed on a clean exit.
You can use the --json flag to only print the results in JSON format.`,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.72", "versionIntroduced": "v1.72",
}, },

View File

@@ -237,6 +237,7 @@ It would be possible to add ISO support fairly easily as the library we use ([go
It would be possible to add write support, but this would only be for creating new archives, not for updating existing archives. It would be possible to add write support, but this would only be for creating new archives, not for updating existing archives.
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/archive/archive.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length --> <!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/archive/archive.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options ### Standard options
Here are the Standard options specific to archive (Read archives). Here are the Standard options specific to archive (Read archives).

View File

@@ -1045,8 +1045,3 @@ put them back in again. -->
- n4n5 <its.just.n4n5@gmail.com> - n4n5 <its.just.n4n5@gmail.com>
- aliaj1 <ali19961@gmail.com> - aliaj1 <ali19961@gmail.com>
- Sean Turner <30396892+seanturner026@users.noreply.github.com> - Sean Turner <30396892+seanturner026@users.noreply.github.com>
- jijamik <30904953+jijamik@users.noreply.github.com>
- Dominik Sander <git@dsander.de>
- Nikolay Kiryanov <nikolay@kiryanov.ru>
- Diana <5275194+DianaNites@users.noreply.github.com>
- Duncan Smart <duncan.smart@gmail.com>

View File

@@ -103,26 +103,6 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
chunks only have an MD5 if the source remote was capable of MD5 chunks only have an MD5 if the source remote was capable of MD5
hashes, e.g. the local disk. hashes, e.g. the local disk.
### Metadata and tags
Rclone can map arbitrary metadata to Azure Blob headers, user metadata, and tags
when `--metadata` is enabled (or when using `--metadata-set` / `--metadata-mapper`).
- Headers: Set these keys in metadata to map to the corresponding blob headers:
- `cache-control`, `content-disposition`, `content-encoding`, `content-language`, `content-type`.
- User metadata: Any other non-reserved keys are written as user metadata
(keys are normalized to lowercase). Keys starting with `x-ms-` are reserved and
are not stored as user metadata.
- Tags: Provide `x-ms-tags` as a comma-separated list of `key=value` pairs, e.g.
`x-ms-tags=env=dev,team=sync`. These are applied as blob tags on upload and on
server-side copies. Whitespace around keys/values is ignored.
- Modtime override: Provide `mtime` in RFC3339/RFC3339Nano format to override the
stored modtime persisted in user metadata. If `mtime` cannot be parsed, rclone
logs a debug message and ignores the override.
Notes:
- Rclone ignores reserved `x-ms-*` keys (except `x-ms-tags`) for user metadata.
### Performance ### Performance
When uploading large files, increasing the value of When uploading large files, increasing the value of
@@ -979,13 +959,13 @@ Properties:
- Type: string - Type: string
- Required: false - Required: false
- Examples: - Examples:
- "" - ""
- The container and its blobs can be accessed only with an authorized request. - The container and its blobs can be accessed only with an authorized request.
- It's a default value. - It's a default value.
- "blob" - "blob"
- Blob data within this container can be read via anonymous request. - Blob data within this container can be read via anonymous request.
- "container" - "container"
- Allow full public read access for container and blob data. - Allow full public read access for container and blob data.
#### --azureblob-directory-markers #### --azureblob-directory-markers
@@ -1042,12 +1022,12 @@ Properties:
- Type: string - Type: string
- Required: false - Required: false
- Choices: - Choices:
- "" - ""
- By default, the delete operation fails if a blob has snapshots - By default, the delete operation fails if a blob has snapshots
- "include" - "include"
- Specify 'include' to remove the root blob and all its snapshots - Specify 'include' to remove the root blob and all its snapshots
- "only" - "only"
- Specify 'only' to remove only the snapshots but keep the root blob. - Specify 'only' to remove only the snapshots but keep the root blob.
#### --azureblob-description #### --azureblob-description

View File

@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
All copy commands send the following 4 requests: All copy commands send the following 4 requests:
```text ```text
/b2api/v4/b2_authorize_account /b2api/v1/b2_authorize_account
/b2api/v1/b2_create_bucket /b2api/v1/b2_create_bucket
/b2api/v1/b2_list_buckets /b2api/v1/b2_list_buckets
/b2api/v1/b2_list_file_names /b2api/v1/b2_list_file_names
@@ -667,71 +667,6 @@ Properties:
- Type: Encoding - Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot - Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
#### --b2-sse-customer-algorithm
If using SSE-C, the server-side encryption algorithm used when storing this object in B2.
Properties:
- Config: sse_customer_algorithm
- Env Var: RCLONE_B2_SSE_CUSTOMER_ALGORITHM
- Type: string
- Required: false
- Examples:
- ""
- None
- "AES256"
- Advanced Encryption Standard (256 bits key length)
#### --b2-sse-customer-key
To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key-base64.
Properties:
- Config: sse_customer_key
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY
- Type: string
- Required: false
- Examples:
- ""
- None
#### --b2-sse-customer-key-base64
To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key.
Properties:
- Config: sse_customer_key_base64
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY_BASE64
- Type: string
- Required: false
- Examples:
- ""
- None
#### --b2-sse-customer-key-md5
If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
Properties:
- Config: sse_customer_key_md5
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY_MD5
- Type: string
- Required: false
- Examples:
- ""
- None
#### --b2-description #### --b2-description
Description of the remote. Description of the remote.
@@ -747,11 +682,9 @@ Properties:
Here are the commands specific to the b2 backend. Here are the commands specific to the b2 backend.
Run them with: Run them with
```console rclone backend COMMAND remote:
rclone backend COMMAND remote:
```
The help below will explain what arguments each command takes. The help below will explain what arguments each command takes.
@@ -763,41 +696,35 @@ These can be run on a running backend using the rc command
### lifecycle ### lifecycle
Read or set the lifecycle for a bucket. Read or set the lifecycle for a bucket
```console rclone backend lifecycle remote: [options] [<arguments>+]
rclone backend lifecycle remote: [options] [<arguments>+]
```
This command can be used to read or set the lifecycle for a bucket. This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules: To show the current lifecycle rules:
```console rclone backend lifecycle b2:bucket
rclone backend lifecycle b2:bucket
```
This will dump something like this showing the lifecycle rules. This will dump something like this showing the lifecycle rules.
```json [
[ {
{ "daysFromHidingToDeleting": 1,
"daysFromHidingToDeleting": 1, "daysFromUploadingToHiding": null,
"daysFromUploadingToHiding": null, "daysFromStartingToCancelingUnfinishedLargeFiles": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null, "fileNamePrefix": ""
"fileNamePrefix": "" }
} ]
]
```
If there are no lifecycle rules (the default) then it will just return `[]`. If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules: To reset the current lifecycle rules:
```console rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30 rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
```
This will run and then print the new lifecycle rules as above. This will run and then print the new lifecycle rules as above.
@@ -809,27 +736,22 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made. overwrites will still cause versions to be made.
```console rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
``` See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>
Options: Options:
- "daysFromHidingToDeleting": After a file has been hidden for this many days - "daysFromHidingToDeleting": After a file has been hidden for this many days it is deleted. 0 is off.
it is deleted. 0 is off. - "daysFromStartingToCancelingUnfinishedLargeFiles": Cancels any unfinished large file versions after this many days
- "daysFromStartingToCancelingUnfinishedLargeFiles": Cancels any unfinished - "daysFromUploadingToHiding": This many days after uploading a file is hidden
large file versions after this many days.
- "daysFromUploadingToHiding": This many days after uploading a file is hidden.
### cleanup ### cleanup
Remove unfinished large file uploads. Remove unfinished large file uploads.
```console rclone backend cleanup remote: [options] [<arguments>+]
rclone backend cleanup remote: [options] [<arguments>+]
```
This command removes unfinished large file uploads of age greater than This command removes unfinished large file uploads of age greater than
max-age, which defaults to 24 hours. max-age, which defaults to 24 hours.
@@ -837,33 +759,29 @@ max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what Note that you can use --interactive/-i or --dry-run with this command to see what
it would do. it would do.
```console rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup b2:bucket/path/to/object rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
```
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc. Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
Options: Options:
- "max-age": Max age of upload to delete. - "max-age": Max age of upload to delete
### cleanup-hidden ### cleanup-hidden
Remove old versions of files. Remove old versions of files.
```console rclone backend cleanup-hidden remote: [options] [<arguments>+]
rclone backend cleanup-hidden remote: [options] [<arguments>+]
```
This command removes any old hidden versions of files. This command removes any old hidden versions of files.
Note that you can use --interactive/-i or --dry-run with this command to see what Note that you can use --interactive/-i or --dry-run with this command to see what
it would do. it would do.
```console rclone backend cleanup-hidden b2:bucket/path/to/dir
rclone backend cleanup-hidden b2:bucket/path/to/dir
```
<!-- autogenerated options stop --> <!-- autogenerated options stop -->

View File

@@ -1047,16 +1047,20 @@ encodings.)
The following backends have known issues that need more investigation: The following backends have known issues that need more investigation:
<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs ---> <!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
- `TestDropbox` (`dropbox`) - `TestGoFile` (`gofile`)
- [`TestBisyncRemoteRemote/normalization`](https://pub.rclone.org/integration-tests/current/dropbox-cmd.bisync-TestDropbox-1.txt) - [`TestBisyncRemoteLocal/all_changed`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
- Updated: 2025-11-21-010037 - [`TestBisyncRemoteLocal/backupdir`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
- [`TestBisyncRemoteLocal/basic`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
- [`TestBisyncRemoteLocal/changes`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
- [`TestBisyncRemoteLocal/check_access`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
- [78 more](https://pub.rclone.org/integration-tests/current/)
- Updated: 2025-08-21-010015
<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs ---> <!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
The following backends either have not been tested recently or have known issues The following backends either have not been tested recently or have known issues
that are deemed unfixable for the time being: that are deemed unfixable for the time being:
<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs ---> <!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->
- `TestArchive` (`archive`)
- `TestCache` (`cache`) - `TestCache` (`cache`)
- `TestFileLu` (`filelu`) - `TestFileLu` (`filelu`)
- `TestFilesCom` (`filescom`) - `TestFilesCom` (`filescom`)

View File

@@ -323,19 +323,6 @@ Properties:
- Type: string - Type: string
- Required: false - Required: false
#### --box-config-credentials
Box App config.json contents.
Leave blank normally.
Properties:
- Config: config_credentials
- Env Var: RCLONE_BOX_CONFIG_CREDENTIALS
- Type: string
- Required: false
#### --box-access-token #### --box-access-token
Box App Primary Access Token Box App Primary Access Token
@@ -360,10 +347,10 @@ Properties:
- Type: string - Type: string
- Default: "user" - Default: "user"
- Examples: - Examples:
- "user" - "user"
- Rclone should act on behalf of a user. - Rclone should act on behalf of a user.
- "enterprise" - "enterprise"
- Rclone should act on behalf of a service account. - Rclone should act on behalf of a service account.
### Advanced options ### Advanced options

View File

@@ -394,12 +394,12 @@ Properties:
- Type: SizeSuffix - Type: SizeSuffix
- Default: 5Mi - Default: 5Mi
- Examples: - Examples:
- "1M" - "1M"
- 1 MiB - 1 MiB
- "5M" - "5M"
- 5 MiB - 5 MiB
- "10M" - "10M"
- 10 MiB - 10 MiB
#### --cache-info-age #### --cache-info-age
@@ -414,12 +414,12 @@ Properties:
- Type: Duration - Type: Duration
- Default: 6h0m0s - Default: 6h0m0s
- Examples: - Examples:
- "1h" - "1h"
- 1 hour - 1 hour
- "24h" - "24h"
- 24 hours - 24 hours
- "48h" - "48h"
- 48 hours - 48 hours
#### --cache-chunk-total-size #### --cache-chunk-total-size
@@ -435,12 +435,12 @@ Properties:
- Type: SizeSuffix - Type: SizeSuffix
- Default: 10Gi - Default: 10Gi
- Examples: - Examples:
- "500M" - "500M"
- 500 MiB - 500 MiB
- "1G" - "1G"
- 1 GiB - 1 GiB
- "10G" - "10G"
- 10 GiB - 10 GiB
### Advanced options ### Advanced options
@@ -698,11 +698,9 @@ Properties:
Here are the commands specific to the cache backend. Here are the commands specific to the cache backend.
Run them with: Run them with
```console rclone backend COMMAND remote:
rclone backend COMMAND remote:
```
The help below will explain what arguments each command takes. The help below will explain what arguments each command takes.
@@ -716,8 +714,6 @@ These can be run on a running backend using the rc command
Print stats on the cache backend in JSON format. Print stats on the cache backend in JSON format.
```console rclone backend stats remote: [options] [<arguments>+]
rclone backend stats remote: [options] [<arguments>+]
```
<!-- autogenerated options stop --> <!-- autogenerated options stop -->

View File

@@ -6,130 +6,6 @@ description: "Rclone Changelog"
# Changelog # Changelog
## v1.72.0 - 2025-11-21
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
- New backends
- [Archive](/archive) backend to read archives on cloud storage. (Nick Craig-Wood)
- New S3 providers
- [Cubbit Object Storage](/s3/#Cubbit) (Marco Ferretti)
- [FileLu S5 Object Storage](/s3/#filelu-s5) (kingston125)
- [Hetzner Object Storage](/s3/#hetzner) (spiffytech)
- [Intercolo Object Storage](/s3/#intercolo) (Robin Rolf)
- [Rabata S3-compatible secure cloud storage](/s3/#Rabata) (dougal)
- [Servercore Object Storage](/s3/#servercore) (dougal)
- [SpectraLogic](/s3/#spectralogic) (dougal)
- New commands
- [rclone archive](/commands/rclone_archive/): command to create and read archive files (Fawzib Rojas)
- [rclone config string](/commands/rclone_config_string/): for making connection strings (Nick Craig-Wood)
- [rclone test speed](/commands/rclone_test_speed/): Add command to test a specified remotes speed (dougal)
- New Features
- backends: many backends have has a paged listing (`ListP`) interface added
- this enables progress when listing large directories and reduced memory usage
- build
- Bump golang.org/x/crypto from 0.43.0 to 0.45.0 to fix CVE-2025-58181 (dependabot[bot])
- Modernize code and tests (Nick Craig-Wood, russcoss, juejinyuxitu, reddaisyy, dulanting, Oleksandr Redko)
- Update all dependencies (Nick Craig-Wood)
- Enable support for `aix/ppc64` (Lakshmi-Surekha)
- check: Improved reporting of differences in sizes and contents (albertony)
- copyurl: Added `--url` to read URLs from CSV file (S-Pegg1, dougal)
- docs:
- markdown linting (albertony)
- fixes (albertony, Andrew Gunnerson, anon-pradip, Claudius Ellsel, dougal, iTrooz, Jean-Christophe Cura, Joseph Brownlee, kapitainsky, Matt LaPaglia, n4n5, Nick Craig-Wood, nielash, SublimePeace, Ted Robertson, vastonus)
- fs: remove unnecessary Seek call on log file (Aneesh Agrawal)
- hashsum: Improved output format when listing algorithms (albertony)
- lib/http: Cleanup indentation and other whitespace in http serve template (albertony)
- lsf: Add support for `unix` and `unixnano` time formats (Motte)
- oauthutil: Improved debug logs from token refresh (albertony)
- rc
- Add [job/batch](/rc/#job-batch) for sending batches of rc commands to run concurrently (Nick Craig-Wood)
- Add `runningIds` and `finishedIds` to [job/list](/rc/#job-list) (n4n5)
- Add `osVersion`, `osKernel` and `osArch` to [core/version](/rc/#core-version) (Nick Craig-Wood)
- Make sure fatal errors run via the rc don't crash rclone (Nick Craig-Wood)
- Add `executeId` to job statuses in [job/list](/rc/#job-list) (Nikolay Kiryanov)
- `config/unlock`: rename parameter to `configPassword` accept old as well (Nick Craig-Wood)
- serve http: Download folders as zip (dougal)
- Bug Fixes
- build
- Fix tls: failed to verify certificate: x509: negative serial number (Nick Craig-Wood)
- march
- Fix `--no-traverse` being very slow (Nick Craig-Wood)
- serve s3: Fix log output to remove the EXTRA messages (iTrooz)
- Mount
- Windows: improve error message on missing WinFSP (divinity76)
- Local
- Add `--skip-specials` to ignore special files (Adam Dinwoodie)
- Azure Blob
- Add ListP interface (dougal)
- Azurefiles
- Add ListP interface (Nick Craig-Wood)
- B2
- Add ListP interface (dougal)
- Add Server-Side encryption support (fries1234)
- Fix "expected a FileSseMode but found: ''" (dougal)
- Allow individual old versions to be deleted with `--b2-versions` (dougal)
- Box
- Add ListP interface (Nick Craig-Wood)
- Allow configuration with config file contents (Dominik Sander)
- Compress
- Add zstd compression (Alex)
- Drive
- Add ListP interface (Nick Craig-Wood)
- Dropbox
- Add ListP interface (Nick Craig-Wood)
- Fix error moving just created objects (Nick Craig-Wood)
- FTP
- Fix SOCKS proxy support (dougal)
- Fix transfers from servers that return 250 ok messages (jijamik)
- Google Cloud Storage
- Add ListP interface (dougal)
- Fix `--gcs-storage-class` to work with server side copy for objects (Riaz Arbi)
- HTTP
- Add basic metadata and provide it via serve (Oleg Kunitsyn)
- Jottacloud
- Add support for Let's Go Cloud (from MediaMarkt) as a whitelabel service (albertony)
- Add support for MediaMarkt Cloud as a whitelabel service (albertony)
- Added support for traditional oauth authentication also for the main service (albertony)
- Abort attempts to run unsupported rclone authorize command (albertony)
- Improved token refresh handling (albertony)
- Fix legacy authentication (albertony)
- Fix authentication for whitelabel services from Elkjøp subsidiaries (albertony)
- Mega
- Implement 2FA login (iTrooz)
- Memory
- Add ListP interface (dougal)
- Onedrive
- Add ListP interface (Nick Craig-Wood)
- Oracle Object Storage
- Add ListP interface (dougal)
- Pcloud
- Add ListP interface (Nick Craig-Wood)
- Proton Drive
- Automated 2FA login with OTP secret key (Microscotch)
- S3
- Make it easier to add new S3 providers (dougal)
- Add `--s3-use-data-integrity-protections` quirk to fix BadDigest error in Alibaba, Tencent (hunshcn)
- Add support for `--upload-header`, `If-Match` and `If-None-Match` (Sean Turner)
- Fix single file copying behavior with low permission (hunshcn)
- SFTP
- Fix zombie SSH processes with `--sftp-ssh` (Copilot)
- Smb
- Optimize smb mount performance by avoiding stat checks during initialization (Sudipto Baral)
- Swift
- Add ListP interface (dougal)
- If storage_policy isn't set, use the root containers policy (Andrew Ruthven)
- Report disk usage in segment containers (Andrew Ruthven)
- Ulozto
- Implement the About functionality (Lukas Krejci)
- Fix downloads returning HTML error page (aliaj1)
- WebDAV
- Optimize bearer token fetching with singleflight (hunshcn)
- Add ListP interface (Nick Craig-Wood)
- Use SpaceSepList to parse bearer token command (hunshcn)
- Add `Access-Control-Max-Age` header for CORS preflight caching (viocha)
- Fix out of memory with sharepoint-ntlm when uploading large file (Nick Craig-Wood)
## v1.71.2 - 2025-10-20 ## v1.71.2 - 2025-10-20
[See commits](https://github.com/rclone/rclone/compare/v1.71.1...v1.71.2) [See commits](https://github.com/rclone/rclone/compare/v1.71.1...v1.71.2)

View File

@@ -356,22 +356,22 @@ Properties:
- Type: string - Type: string
- Default: "md5" - Default: "md5"
- Examples: - Examples:
- "none" - "none"
- Pass any hash supported by wrapped remote for non-chunked files. - Pass any hash supported by wrapped remote for non-chunked files.
- Return nothing otherwise. - Return nothing otherwise.
- "md5" - "md5"
- MD5 for composite files. - MD5 for composite files.
- "sha1" - "sha1"
- SHA1 for composite files. - SHA1 for composite files.
- "md5all" - "md5all"
- MD5 for all files. - MD5 for all files.
- "sha1all" - "sha1all"
- SHA1 for all files. - SHA1 for all files.
- "md5quick" - "md5quick"
- Copying a file to chunker will request MD5 from the source. - Copying a file to chunker will request MD5 from the source.
- Falling back to SHA1 if unsupported. - Falling back to SHA1 if unsupported.
- "sha1quick" - "sha1quick"
- Similar to "md5quick" but prefers SHA1 over MD5. - Similar to "md5quick" but prefers SHA1 over MD5.
### Advanced options ### Advanced options
@@ -421,13 +421,13 @@ Properties:
- Type: string - Type: string
- Default: "simplejson" - Default: "simplejson"
- Examples: - Examples:
- "none" - "none"
- Do not use metadata files at all. - Do not use metadata files at all.
- Requires hash type "none". - Requires hash type "none".
- "simplejson" - "simplejson"
- Simple JSON supports hash sums and chunk validation. - Simple JSON supports hash sums and chunk validation.
- -
- It has the following fields: ver, size, nchunks, md5, sha1. - It has the following fields: ver, size, nchunks, md5, sha1.
#### --chunker-fail-hard #### --chunker-fail-hard
@@ -440,10 +440,10 @@ Properties:
- Type: bool - Type: bool
- Default: false - Default: false
- Examples: - Examples:
- "true" - "true"
- Report errors and abort current command. - Report errors and abort current command.
- "false" - "false"
- Warn user, skip incomplete file and proceed. - Warn user, skip incomplete file and proceed.
#### --chunker-transactions #### --chunker-transactions
@@ -456,19 +456,19 @@ Properties:
- Type: string - Type: string
- Default: "rename" - Default: "rename"
- Examples: - Examples:
- "rename" - "rename"
- Rename temporary files after a successful transaction. - Rename temporary files after a successful transaction.
- "norename" - "norename"
- Leave temporary file names and write transaction ID to metadata file. - Leave temporary file names and write transaction ID to metadata file.
- Metadata is required for no rename transactions (meta format cannot be "none"). - Metadata is required for no rename transactions (meta format cannot be "none").
- If you are using norename transactions you should be careful not to downgrade Rclone - If you are using norename transactions you should be careful not to downgrade Rclone
- as older versions of Rclone don't support this transaction style and will misinterpret - as older versions of Rclone don't support this transaction style and will misinterpret
- files manipulated by norename transactions. - files manipulated by norename transactions.
- This method is EXPERIMENTAL, don't use on production systems. - This method is EXPERIMENTAL, don't use on production systems.
- "auto" - "auto"
- Rename or norename will be used depending on capabilities of the backend. - Rename or norename will be used depending on capabilities of the backend.
- If meta format is set to "none", rename transactions will always be used. - If meta format is set to "none", rename transactions will always be used.
- This method is EXPERIMENTAL, don't use on production systems. - This method is EXPERIMENTAL, don't use on production systems.
#### --chunker-description #### --chunker-description

View File

@@ -15,6 +15,8 @@ mounting them, listing them in lots of different ways.
See the home page (https://rclone.org/) for installation, usage, See the home page (https://rclone.org/) for installation, usage,
documentation, changelog and configuration walkthroughs. documentation, changelog and configuration walkthroughs.
``` ```
rclone [flags] rclone [flags]
``` ```
@@ -24,8 +26,6 @@ rclone [flags]
``` ```
--alias-description string Description of the remote --alias-description string Description of the remote
--alias-remote string Remote or path to alias --alias-remote string Remote or path to alias
--archive-description string Description of the remote
--archive-remote string Remote to wrap to read archives from
--ask-password Allow prompt for password for encrypted configuration (default true) --ask-password Allow prompt for password for encrypted configuration (default true)
--auto-confirm If enabled, do not request console confirmation --auto-confirm If enabled, do not request console confirmation
--azureblob-access-tier string Access tier of blob: hot, cool, cold or archive --azureblob-access-tier string Access tier of blob: hot, cool, cold or archive
@@ -105,10 +105,6 @@ rclone [flags]
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files --b2-hard-delete Permanently delete files on remote removal, otherwise hide files
--b2-key string Application Key --b2-key string Application Key
--b2-lifecycle int Set the number of days deleted files should be kept when creating a bucket --b2-lifecycle int Set the number of days deleted files should be kept when creating a bucket
--b2-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in B2
--b2-sse-customer-key string To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
--b2-sse-customer-key-base64 string To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
--b2-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging --b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
--b2-upload-concurrency int Concurrency for multipart uploads (default 4) --b2-upload-concurrency int Concurrency for multipart uploads (default 4)
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
@@ -185,7 +181,7 @@ rclone [flags]
--combine-upstreams SpaceSepList Upstreams for combining --combine-upstreams SpaceSepList Upstreams for combining
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
--compress-description string Description of the remote --compress-description string Description of the remote
--compress-level string GZIP (levels -2 to 9): --compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode (default "gzip") --compress-mode string Compression mode (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi) --compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
--compress-remote string Remote to compress --compress-remote string Remote to compress
@@ -553,7 +549,6 @@ rclone [flags]
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off) --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
--max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000) --max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
--max-transfer SizeSuffix Maximum size of data to transfer (default off) --max-transfer SizeSuffix Maximum size of data to transfer (default off)
--mega-2fa string The 2FA code of your MEGA account if the account is set up with one
--mega-debug Output more debug from Mega --mega-debug Output more debug from Mega
--mega-description string Description of the remote --mega-description string Description of the remote
--mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot) --mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
@@ -720,7 +715,6 @@ rclone [flags]
--protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot) --protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot)
--protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured) --protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured)
--protondrive-original-file-size Return the file size before encryption (default true) --protondrive-original-file-size Return the file size before encryption (default true)
--protondrive-otp-secret-key string The OTP secret key (obscured)
--protondrive-password string The password of your proton account (obscured) --protondrive-password string The password of your proton account (obscured)
--protondrive-replace-existing-draft Create a new revision when filename conflict is detected --protondrive-replace-existing-draft Create a new revision when filename conflict is detected
--protondrive-username string The username of your proton account --protondrive-username string The username of your proton account
@@ -837,7 +831,6 @@ rclone [flags]
--s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset) --s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
--s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset) --s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset)
--s3-use-arn-region If true, enables arn region support for the service --s3-use-arn-region If true, enables arn region support for the service
--s3-use-data-integrity-protections Tristate If true use AWS S3 data integrity protections (default unset)
--s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support) --s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support)
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
@@ -922,7 +915,6 @@ rclone [flags]
--sia-user-agent string Siad User Agent (default "Sia-Agent") --sia-user-agent string Siad User Agent (default "Sia-Agent")
--size-only Skip based on size only, not modtime or checksum --size-only Skip based on size only, not modtime or checksum
--skip-links Don't warn about skipped symlinks --skip-links Don't warn about skipped symlinks
--skip-specials Don't warn about skipped pipes, sockets and device objects
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true) --smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
--smb-description string Description of the remote --smb-description string Description of the remote
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP") --smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
@@ -1023,7 +1015,7 @@ rclone [flags]
--use-json-log Use json log format --use-json-log Use json log format
--use-mmap Use mmap allocator (see docs) --use-mmap Use mmap allocator (see docs)
--use-server-modtime Use server modified time instead of object metadata --use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string (default "rclone/v1.72.0") --user-agent string Set the user-agent to a specified string (default "rclone/v1.71.0")
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
-V, --version Print the version number -V, --version Print the version number
--webdav-auth-redirect Preserve authentication on redirect --webdav-auth-redirect Preserve authentication on redirect
@@ -1065,11 +1057,7 @@ rclone [flags]
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone about](/commands/rclone_about/) - Get quota information from the remote. * [rclone about](/commands/rclone_about/) - Get quota information from the remote.
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization. * [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command. * [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
* [rclone bisync](/commands/rclone_bisync/) - Perform bidirectional synchronization between two paths. * [rclone bisync](/commands/rclone_bisync/) - Perform bidirectional synchronization between two paths.
@@ -1123,5 +1111,3 @@ rclone [flags]
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion. * [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
* [rclone version](/commands/rclone_version/) - Show the version number. * [rclone version](/commands/rclone_version/) - Show the version number.
<!-- markdownlint-restore -->

View File

@@ -15,46 +15,40 @@ output. The output is typically used, free, quota and trash contents.
E.g. Typical output from `rclone about remote:` is: E.g. Typical output from `rclone about remote:` is:
```text Total: 17 GiB
Total: 17 GiB Used: 7.444 GiB
Used: 7.444 GiB Free: 1.315 GiB
Free: 1.315 GiB Trashed: 100.000 MiB
Trashed: 100.000 MiB Other: 8.241 GiB
Other: 8.241 GiB
```
Where the fields are: Where the fields are:
- Total: Total size available. * Total: Total size available.
- Used: Total size used. * Used: Total size used.
- Free: Total space available to this user. * Free: Total space available to this user.
- Trashed: Total space used by trash. * Trashed: Total space used by trash.
- Other: Total amount in other storage (e.g. Gmail, Google Photos). * Other: Total amount in other storage (e.g. Gmail, Google Photos).
- Objects: Total number of objects in the storage. * Objects: Total number of objects in the storage.
All sizes are in number of bytes. All sizes are in number of bytes.
Applying a `--full` flag to the command prints the bytes in full, e.g. Applying a `--full` flag to the command prints the bytes in full, e.g.
```text Total: 18253611008
Total: 18253611008 Used: 7993453766
Used: 7993453766 Free: 1411001220
Free: 1411001220 Trashed: 104857602
Trashed: 104857602 Other: 8849156022
Other: 8849156022
```
A `--json` flag generates conveniently machine-readable output, e.g. A `--json` flag generates conveniently machine-readable output, e.g.
```json {
{ "total": 18253611008,
"total": 18253611008, "used": 7993453766,
"used": 7993453766, "trashed": 104857602,
"trashed": 104857602, "other": 8849156022,
"other": 8849156022, "free": 1411001220
"free": 1411001220 }
}
```
Not all backends print all fields. Information is not included if it is not Not all backends print all fields. Information is not included if it is not
provided by a backend. Where the value is unlimited it is omitted. provided by a backend. Where the value is unlimited it is omitted.
@@ -62,6 +56,7 @@ provided by a backend. Where the value is unlimited it is omitted.
Some backends does not support the `rclone about` command at all, Some backends does not support the `rclone about` command at all,
see complete list in [documentation](https://rclone.org/overview/#optional-features). see complete list in [documentation](https://rclone.org/overview/#optional-features).
``` ```
rclone about remote: [flags] rclone about remote: [flags]
``` ```
@@ -78,10 +73,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -1,47 +0,0 @@
---
title: "rclone archive"
description: "Perform an action on an archive."
versionIntroduced: v1.72
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/ and as part of making a release run "make commanddocs"
---
# rclone archive
Perform an action on an archive.
## Synopsis
Perform an action on an archive. Requires the use of a
subcommand to specify the protocol, e.g.
rclone archive list remote:file.zip
Each subcommand has its own options which you can see in their help.
See [rclone archive create](/commands/rclone_archive_create/) for the
archive formats supported.
```
rclone archive <action> [opts] <source> [<destination>] [flags]
```
## Options
```
-h, --help help for archive
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone archive create](/commands/rclone_archive_create/) - Archive source file(s) to destination.
* [rclone archive extract](/commands/rclone_archive_extract/) - Extract archives from source to destination.
* [rclone archive list](/commands/rclone_archive_list/) - List archive contents from source.
<!-- markdownlint-restore -->

View File

@@ -1,95 +0,0 @@
---
title: "rclone archive create"
description: "Archive source file(s) to destination."
versionIntroduced: v1.72
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/create/ and as part of making a release run "make commanddocs"
---
# rclone archive create
Archive source file(s) to destination.
## Synopsis
Creates an archive from the files in source:path and saves the archive to
dest:path. If dest:path is missing, it will write to the console.
The valid formats for the `--format` flag are listed below. If
`--format` is not set rclone will guess it from the extension of dest:path.
| Format | Extensions |
|:-------|:-----------|
| zip | .zip |
| tar | .tar |
| tar.gz | .tar.gz, .tgz, .taz |
| tar.bz2| .tar.bz2, .tb2, .tbz, .tbz2, .tz2 |
| tar.lz | .tar.lz |
| tar.lz4| .tar.lz4 |
| tar.xz | .tar.xz, .txz |
| tar.zst| .tar.zst, .tzst |
| tar.br | .tar.br |
| tar.sz | .tar.sz |
| tar.mz | .tar.mz |
The `--prefix` and `--full-path` flags control the prefix for the files
in the archive.
If the flag `--full-path` is set then the files will have the full source
path as the prefix.
If the flag `--prefix=<value>` is set then the files will have
`<value>` as prefix. It's possible to create invalid file names with
`--prefix=<value>` so use with caution. Flag `--prefix` has
priority over `--full-path`.
Given a directory `/sourcedir` with the following:
file1.txt
dir1/file2.txt
Running the command `rclone archive create /sourcedir /dest.tar.gz`
will make an archive with the contents:
file1.txt
dir1/
dir1/file2.txt
Running the command `rclone archive create --full-path /sourcedir /dest.tar.gz`
will make an archive with the contents:
sourcedir/file1.txt
sourcedir/dir1/
sourcedir/dir1/file2.txt
Running the command `rclone archive create --prefix=my_new_path /sourcedir /dest.tar.gz`
will make an archive with the contents:
my_new_path/file1.txt
my_new_path/dir1/
my_new_path/dir1/file2.txt
```
rclone archive create [flags] <source> [<destination>]
```
## Options
```
--format string Create the archive with format or guess from extension.
--full-path Set prefix for files in archive to source path
-h, --help help for create
--prefix string Set prefix for files in archive to entered value or source path
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
<!-- markdownlint-restore -->

View File

@@ -1,81 +0,0 @@
---
title: "rclone archive extract"
description: "Extract archives from source to destination."
versionIntroduced: v1.72
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/extract/ and as part of making a release run "make commanddocs"
---
# rclone archive extract
Extract archives from source to destination.
## Synopsis
Extract the archive contents to a destination directory auto detecting
the format. See [rclone archive create](/commands/rclone_archive_create/)
for the archive formats supported.
For example on this archive:
```
$ rclone archive list --long remote:archive.zip
6 2025-10-30 09:46:23.000000000 file.txt
0 2025-10-30 09:46:57.000000000 dir/
4 2025-10-30 09:46:57.000000000 dir/bye.txt
```
You can run extract like this
```
$ rclone archive extract remote:archive.zip remote:extracted
```
Which gives this result
```
$ rclone tree remote:extracted
/
├── dir
│ └── bye.txt
└── file.txt
```
The source or destination or both can be local or remote.
Filters can be used to only extract certain files:
```
$ rclone archive extract archive.zip partial --include "bye.*"
$ rclone tree partial
/
└── dir
└── bye.txt
```
The [archive backend](/archive/) can also be used to extract files. It
can be used to read only mount archives also but it supports a
different set of archive formats to the archive commands.
```
rclone archive extract [flags] <source> <destination>
```
## Options
```
-h, --help help for extract
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
<!-- markdownlint-restore -->

View File

@@ -1,96 +0,0 @@
---
title: "rclone archive list"
description: "List archive contents from source."
versionIntroduced: v1.72
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/list/ and as part of making a release run "make commanddocs"
---
# rclone archive list
List archive contents from source.
## Synopsis
List the contents of an archive to the console, auto detecting the
format. See [rclone archive create](/commands/rclone_archive_create/)
for the archive formats supported.
For example:
```
$ rclone archive list remote:archive.zip
6 file.txt
0 dir/
4 dir/bye.txt
```
Or with `--long` flag for more info:
```
$ rclone archive list --long remote:archive.zip
6 2025-10-30 09:46:23.000000000 file.txt
0 2025-10-30 09:46:57.000000000 dir/
4 2025-10-30 09:46:57.000000000 dir/bye.txt
```
Or with `--plain` flag which is useful for scripting:
```
$ rclone archive list --plain /path/to/archive.zip
file.txt
dir/
dir/bye.txt
```
Or with `--dirs-only`:
```
$ rclone archive list --plain --dirs-only /path/to/archive.zip
dir/
```
Or with `--files-only`:
```
$ rclone archive list --plain --files-only /path/to/archive.zip
file.txt
dir/bye.txt
```
Filters may also be used:
```
$ rclone archive list --long archive.zip --include "bye.*"
4 2025-10-30 09:46:57.000000000 dir/bye.txt
```
The [archive backend](/archive/) can also be used to list files. It
can be used to read only mount archives also but it supports a
different set of archive formats to the archive commands.
```
rclone archive list [flags] <source>
```
## Options
```
--dirs-only Only list directories
--files-only Only list files
-h, --help help for list
--long List extra attributtes
--plain Only list file names
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
<!-- markdownlint-restore -->

View File

@@ -11,23 +11,21 @@ Remote authorization.
## Synopsis ## Synopsis
Remote authorization. Used to authorize a remote or headless Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser. Use as instructed by rclone config. rclone from a machine with a browser - use as instructed by
See also the [remote setup documentation](/remote_setup). rclone config.
The command requires 1-3 arguments: The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Name of a backend (e.g. "drive", "s3") - Either a base64 encoded JSON blob obtained from a previous rclone config session
- Either a base64 encoded JSON blob obtained from a previous rclone config session - Or a client_id and client_secret pair obtained from the remote service
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically. link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
string is provided as an argument to this flag, the default template is used.
``` ```
rclone authorize <backendname> [base64_json_blob | client_id client_secret] [flags] rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
``` ```
## Options ## Options
@@ -42,10 +40,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -16,34 +16,27 @@ see the backend docs for definitions.
You can discover what commands a backend implements by using You can discover what commands a backend implements by using
```console rclone backend help remote:
rclone backend help remote: rclone backend help <backendname>
rclone backend help <backendname>
```
You can also discover information about the backend using (see You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs [operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
for more info). for more info).
```console rclone backend features remote:
rclone backend features remote:
```
Pass options to the backend command with -o. This should be key=value or key, e.g.: Pass options to the backend command with -o. This should be key=value or key, e.g.:
```console rclone backend stats remote:path stats -o format=json -o long
rclone backend stats remote:path stats -o format=json -o long
```
Pass arguments to the backend by placing them on the end of the line Pass arguments to the backend by placing them on the end of the line
```console rclone backend cleanup remote:path file1 file2 file3
rclone backend cleanup remote:path file1 file2 file3
```
Note to run these commands on a running backend then see Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs. [backend/command](/rc/#backend-command) in the rc docs.
``` ```
rclone backend <command> remote:path [opts] <args> [flags] rclone backend <command> remote:path [opts] <args> [flags]
``` ```
@@ -63,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -71,10 +64,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -16,19 +16,18 @@ Perform bidirectional synchronization between two paths.
bidirectional cloud sync solution in rclone. bidirectional cloud sync solution in rclone.
It retains the Path1 and Path2 filesystem listings from the prior run. It retains the Path1 and Path2 filesystem listings from the prior run.
On each successive run it will: On each successive run it will:
- list files on Path1 and Path2, and check for changes on each side. - list files on Path1 and Path2, and check for changes on each side.
Changes include `New`, `Newer`, `Older`, and `Deleted` files. Changes include `New`, `Newer`, `Older`, and `Deleted` files.
- Propagate changes on Path1 to Path2, and vice-versa. - Propagate changes on Path1 to Path2, and vice-versa.
Bisync is considered an **advanced command**, so use with care. Bisync is considered an **advanced command**, so use with care.
Make sure you have read and understood the entire [manual](https://rclone.org/bisync) Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) (especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
before using, or data loss can result. Questions can be asked in the or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
[Rclone Forum](https://forum.rclone.org/).
See [full bisync description](https://rclone.org/bisync/) for details. See [full bisync description](https://rclone.org/bisync/) for details.
``` ```
rclone bisync remote1:path1 remote2:path2 [flags] rclone bisync remote1:path1 remote2:path2 [flags]
``` ```
@@ -70,7 +69,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -111,7 +110,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -121,7 +120,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -149,10 +148,5 @@ Flags for filtering directory listings
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -14,21 +14,15 @@ Sends any files to standard output.
You can use it like this to output a single file You can use it like this to output a single file
```sh rclone cat remote:path/to/file
rclone cat remote:path/to/file
```
Or like this to output any file in dir or its subdirectories. Or like this to output any file in dir or its subdirectories.
```sh rclone cat remote:path/to/dir
rclone cat remote:path/to/dir
```
Or like this to output any .txt files in dir or its subdirectories. Or like this to output any .txt files in dir or its subdirectories.
```sh rclone --include "*.txt" cat remote:path/to/dir
rclone --include "*.txt" cat remote:path/to/dir
```
Use the `--head` flag to print characters only at the start, `--tail` for Use the `--head` flag to print characters only at the start, `--tail` for
the end and `--offset` and `--count` to print a section in the middle. the end and `--offset` and `--count` to print a section in the middle.
@@ -39,17 +33,14 @@ Use the `--separator` flag to print a separator value between files. Be sure to
shell-escape special characters. For example, to print a newline between shell-escape special characters. For example, to print a newline between
files, use: files, use:
- bash: * bash:
```sh rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
```
- powershell: * powershell:
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
```powershell
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
```
``` ```
rclone cat remote:path [flags] rclone cat remote:path [flags]
@@ -74,7 +65,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -104,17 +95,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -52,6 +52,7 @@ you what happened to it. These are reminiscent of diff files.
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int) The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
option for more information. option for more information.
``` ```
rclone check source:path dest:path [flags] rclone check source:path dest:path [flags]
``` ```
@@ -78,7 +79,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags used for check commands Flags used for check commands
```text ```
--max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
``` ```
@@ -86,7 +87,7 @@ Flags used for check commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -116,17 +117,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -47,6 +47,7 @@ you what happened to it. These are reminiscent of diff files.
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int) The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
option for more information. option for more information.
``` ```
rclone checksum <hash> sumfile dst:path [flags] rclone checksum <hash> sumfile dst:path [flags]
``` ```
@@ -72,7 +73,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -102,17 +103,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -13,6 +13,7 @@ Clean up the remote if possible.
Clean up the remote if possible. Empty the trash or delete old file Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes. versions. Not supported by all remotes.
``` ```
rclone cleanup remote:path [flags] rclone cleanup remote:path [flags]
``` ```
@@ -30,7 +31,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -38,10 +39,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -15,6 +15,7 @@ Output completion script for a given shell.
Generates a shell completion script for rclone. Generates a shell completion script for rclone.
Run with `--help` to list the supported shells. Run with `--help` to list the supported shells.
## Options ## Options
``` ```
@@ -25,14 +26,9 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone. * [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone.
* [rclone completion fish](/commands/rclone_completion_fish/) - Output fish completion script for rclone. * [rclone completion fish](/commands/rclone_completion_fish/) - Output fish completion script for rclone.
* [rclone completion powershell](/commands/rclone_completion_powershell/) - Output powershell completion script for rclone. * [rclone completion powershell](/commands/rclone_completion_powershell/) - Output powershell completion script for rclone.
* [rclone completion zsh](/commands/rclone_completion_zsh/) - Output zsh completion script for rclone. * [rclone completion zsh](/commands/rclone_completion_zsh/) - Output zsh completion script for rclone.
<!-- markdownlint-restore -->

View File

@@ -13,21 +13,17 @@ Output bash completion script for rclone.
Generates a bash shell autocompletion script for rclone. Generates a bash shell autocompletion script for rclone.
By default, when run without any arguments, By default, when run without any arguments,
```console rclone completion bash
rclone completion bash
```
the generated script will be written to the generated script will be written to
```console /etc/bash_completion.d/rclone
/etc/bash_completion.d/rclone
```
and so rclone will probably need to be run as root, or with sudo. and so rclone will probably need to be run as root, or with sudo.
If you supply a path to a file as the command line argument, then If you supply a path to a file as the command line argument, then
the generated script will be written to that file, in which case the generated script will be written to that file, in which case
you should not need root privileges. you should not need root privileges.
@@ -38,13 +34,12 @@ can logout and login again to use the autocompletion script.
Alternatively, you can source the script directly Alternatively, you can source the script directly
```console . /path/to/my_bash_completion_scripts/rclone
. /path/to/my_bash_completion_scripts/rclone
```
and the autocompletion functionality will be added to your and the autocompletion functionality will be added to your
current shell. current shell.
``` ```
rclone completion bash [output_file] [flags] rclone completion bash [output_file] [flags]
``` ```
@@ -59,10 +54,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
<!-- markdownlint-restore -->

View File

@@ -16,22 +16,19 @@ Generates a fish autocompletion script for rclone.
This writes to /etc/fish/completions/rclone.fish by default so will This writes to /etc/fish/completions/rclone.fish by default so will
probably need to be run with sudo or as root, e.g. probably need to be run with sudo or as root, e.g.
```console sudo rclone completion fish
sudo rclone completion fish
```
Logout and login again to use the autocompletion scripts, or source Logout and login again to use the autocompletion scripts, or source
them directly them directly
```console . /etc/fish/completions/rclone.fish
. /etc/fish/completions/rclone.fish
```
If you supply a command line argument the script will be written If you supply a command line argument the script will be written
there. there.
If output_file is "-", then the output will be written to stdout. If output_file is "-", then the output will be written to stdout.
``` ```
rclone completion fish [output_file] [flags] rclone completion fish [output_file] [flags]
``` ```
@@ -46,10 +43,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
<!-- markdownlint-restore -->

View File

@@ -15,15 +15,14 @@ Generate the autocompletion script for powershell.
To load completions in your current shell session: To load completions in your current shell session:
```console rclone completion powershell | Out-String | Invoke-Expression
rclone completion powershell | Out-String | Invoke-Expression
```
To load completions for every new session, add the output of the above command To load completions for every new session, add the output of the above command
to your powershell profile. to your powershell profile.
If output_file is "-" or missing, then the output will be written to stdout. If output_file is "-" or missing, then the output will be written to stdout.
``` ```
rclone completion powershell [output_file] [flags] rclone completion powershell [output_file] [flags]
``` ```
@@ -38,10 +37,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
<!-- markdownlint-restore -->

View File

@@ -16,22 +16,19 @@ Generates a zsh autocompletion script for rclone.
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
probably need to be run with sudo or as root, e.g. probably need to be run with sudo or as root, e.g.
```console sudo rclone completion zsh
sudo rclone completion zsh
```
Logout and login again to use the autocompletion scripts, or source Logout and login again to use the autocompletion scripts, or source
them directly them directly
```console autoload -U compinit && compinit
autoload -U compinit && compinit
```
If you supply a command line argument the script will be written If you supply a command line argument the script will be written
there. there.
If output_file is "-", then the output will be written to stdout. If output_file is "-", then the output will be written to stdout.
``` ```
rclone completion zsh [output_file] [flags] rclone completion zsh [output_file] [flags]
``` ```
@@ -46,10 +43,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
<!-- markdownlint-restore -->

View File

@@ -14,6 +14,7 @@ Enter an interactive configuration session where you can setup new
remotes and manage existing ones. You may also set or remove a remotes and manage existing ones. You may also set or remove a
password to protect your configuration. password to protect your configuration.
``` ```
rclone config [flags] rclone config [flags]
``` ```
@@ -28,9 +29,6 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone config create](/commands/rclone_config_create/) - Create a new remote with name, type and options. * [rclone config create](/commands/rclone_config_create/) - Create a new remote with name, type and options.
* [rclone config delete](/commands/rclone_config_delete/) - Delete an existing remote. * [rclone config delete](/commands/rclone_config_delete/) - Delete an existing remote.
@@ -45,10 +43,7 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone config reconnect](/commands/rclone_config_reconnect/) - Re-authenticates user with remote. * [rclone config reconnect](/commands/rclone_config_reconnect/) - Re-authenticates user with remote.
* [rclone config redacted](/commands/rclone_config_redacted/) - Print redacted (decrypted) config file, or the redacted config for a single remote. * [rclone config redacted](/commands/rclone_config_redacted/) - Print redacted (decrypted) config file, or the redacted config for a single remote.
* [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote. * [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote.
* [rclone config string](/commands/rclone_config_string/) - Print connection string for a single remote.
* [rclone config touch](/commands/rclone_config_touch/) - Ensure configuration file exists. * [rclone config touch](/commands/rclone_config_touch/) - Ensure configuration file exists.
* [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote. * [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote.
* [rclone config userinfo](/commands/rclone_config_userinfo/) - Prints info about logged in user of remote. * [rclone config userinfo](/commands/rclone_config_userinfo/) - Prints info about logged in user of remote.
<!-- markdownlint-restore -->

View File

@@ -16,17 +16,13 @@ should be passed in pairs of `key` `value` or as `key=value`.
For example, to make a swift remote of name myremote using auto config For example, to make a swift remote of name myremote using auto config
you would do: you would do:
```sh rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth true rclone config create myremote swift env_auth=true
rclone config create myremote swift env_auth=true
```
So for example if you wanted to configure a Google Drive remote but So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this: using remote authorization you would do this:
```sh rclone config create mydrive drive config_is_local=false
rclone config create mydrive drive config_is_local=false
```
Note that if the config process would normally ask a question the Note that if the config process would normally ask a question the
default is taken (unless `--non-interactive` is used). Each time default is taken (unless `--non-interactive` is used). Each time
@@ -54,29 +50,29 @@ it.
This will look something like (some irrelevant detail removed): This will look something like (some irrelevant detail removed):
```json ```
{ {
"State": "*oauth-islocal,teamdrive,,", "State": "*oauth-islocal,teamdrive,,",
"Option": { "Option": {
"Name": "config_is_local", "Name": "config_is_local",
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
"Default": true, "Default": true,
"Examples": [ "Examples": [
{ {
"Value": "true", "Value": "true",
"Help": "Yes" "Help": "Yes"
}, },
{ {
"Value": "false", "Value": "false",
"Help": "No" "Help": "No"
} }
], ],
"Required": false, "Required": false,
"IsPassword": false, "IsPassword": false,
"Type": "bool", "Type": "bool",
"Exclusive": true, "Exclusive": true,
}, },
"Error": "", "Error": "",
} }
``` ```
@@ -99,9 +95,7 @@ The keys of `Option` are used as follows:
If `Error` is set then it should be shown to the user at the same If `Error` is set then it should be shown to the user at the same
time as the question. time as the question.
```sh rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
```
Note that when using `--continue` all passwords should be passed in Note that when using `--continue` all passwords should be passed in
the clear (not obscured). Any default config values should be passed the clear (not obscured). Any default config values should be passed
@@ -117,6 +111,7 @@ defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration. as a readable demonstration.
``` ```
rclone config create name type [key value]* [flags] rclone config create name type [key value]* [flags]
``` ```
@@ -139,10 +134,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -15,6 +15,7 @@ This normally means revoking the oauth token.
To reconnect use "rclone config reconnect". To reconnect use "rclone config reconnect".
``` ```
rclone config disconnect remote: [flags] rclone config disconnect remote: [flags]
``` ```
@@ -29,10 +30,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -14,6 +14,7 @@ Enter an interactive configuration session where you can setup new
remotes and manage existing ones. You may also set or remove a remotes and manage existing ones. You may also set or remove a
password to protect your configuration. password to protect your configuration.
``` ```
rclone config edit [flags] rclone config edit [flags]
``` ```
@@ -28,10 +29,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -12,6 +12,7 @@ set, remove and check the encryption for the config file
This command sets, clears and checks the encryption for the config file using This command sets, clears and checks the encryption for the config file using
the subcommands below. the subcommands below.
## Options ## Options
``` ```
@@ -22,13 +23,8 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
* [rclone config encryption check](/commands/rclone_config_encryption_check/) - Check that the config file is encrypted * [rclone config encryption check](/commands/rclone_config_encryption_check/) - Check that the config file is encrypted
* [rclone config encryption remove](/commands/rclone_config_encryption_remove/) - Remove the config file encryption password * [rclone config encryption remove](/commands/rclone_config_encryption_remove/) - Remove the config file encryption password
* [rclone config encryption set](/commands/rclone_config_encryption_set/) - Set or change the config file encryption password * [rclone config encryption set](/commands/rclone_config_encryption_set/) - Set or change the config file encryption password
<!-- markdownlint-restore -->

View File

@@ -18,6 +18,7 @@ If decryption fails it will return a non-zero exit code if using
If the config file is not encrypted it will return a non zero exit code. If the config file is not encrypted it will return a non zero exit code.
``` ```
rclone config encryption check [flags] rclone config encryption check [flags]
``` ```
@@ -32,10 +33,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file * [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file
<!-- markdownlint-restore -->

View File

@@ -19,6 +19,7 @@ password.
If the config was not encrypted then no error will be returned and If the config was not encrypted then no error will be returned and
this command will do nothing. this command will do nothing.
``` ```
rclone config encryption remove [flags] rclone config encryption remove [flags]
``` ```
@@ -33,10 +34,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file * [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file
<!-- markdownlint-restore -->

View File

@@ -29,6 +29,7 @@ encryption remove`), then set it again with this command which may be
easier if you don't mind the unencrypted config file being on the disk easier if you don't mind the unencrypted config file being on the disk
briefly. briefly.
``` ```
rclone config encryption set [flags] rclone config encryption set [flags]
``` ```
@@ -43,10 +44,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file * [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -16,14 +16,13 @@ The `password` should be passed in in clear (unobscured).
For example, to set password of a remote of name myremote you would do: For example, to set password of a remote of name myremote you would do:
```sh rclone config password myremote fieldname mypassword
rclone config password myremote fieldname mypassword rclone config password myremote fieldname=mypassword
rclone config password myremote fieldname=mypassword
```
This command is obsolete now that "config update" and "config create" This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly. both support obscuring passwords directly.
``` ```
rclone config password name [key value]+ [flags] rclone config password name [key value]+ [flags]
``` ```
@@ -38,10 +37,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -15,6 +15,7 @@ To disconnect the remote use "rclone config disconnect".
This normally means going through the interactive oauth flow again. This normally means going through the interactive oauth flow again.
``` ```
rclone config reconnect remote: [flags] rclone config reconnect remote: [flags]
``` ```
@@ -29,10 +30,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -20,6 +20,8 @@ This makes the config file suitable for posting online for support.
It should be double checked before posting as the redaction may not be perfect. It should be double checked before posting as the redaction may not be perfect.
``` ```
rclone config redacted [<remote>] [flags] rclone config redacted [<remote>] [flags]
``` ```
@@ -34,10 +36,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -1,55 +0,0 @@
---
title: "rclone config string"
description: "Print connection string for a single remote."
versionIntroduced: v1.72
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/string/ and as part of making a release run "make commanddocs"
---
# rclone config string
Print connection string for a single remote.
## Synopsis
Print a connection string for a single remote.
The [connection strings](/docs/#connection-strings) can be used
wherever a remote is needed and can be more convenient than using the
config file, especially if using the RC API.
Backend parameters may be provided to the command also.
Example:
```sh
$ rclone config string s3:rclone --s3-no-check-bucket
:s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone
```
**NB** the strings are not quoted for use in shells (eg bash,
powershell, windows cmd). Most will work if enclosed in "double
quotes", however connection strings that contain double quotes will
require further quoting which is very shell dependent.
```
rclone config string <remote> [flags]
```
## Options
```
-h, --help help for string
```
See the [global flags page](/flags/) for global options not listed here.
## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -22,10 +22,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -16,17 +16,13 @@ pairs of `key` `value` or as `key=value`.
For example, to update the env_auth field of a remote of name myremote For example, to update the env_auth field of a remote of name myremote
you would do: you would do:
```sh rclone config update myremote env_auth true
rclone config update myremote env_auth true rclone config update myremote env_auth=true
rclone config update myremote env_auth=true
```
If the remote uses OAuth the token will be updated, if you don't If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus: require this add an extra parameter thus:
```sh rclone config update myremote env_auth=true config_refresh_token=false
rclone config update myremote env_auth=true config_refresh_token=false
```
Note that if the config process would normally ask a question the Note that if the config process would normally ask a question the
default is taken (unless `--non-interactive` is used). Each time default is taken (unless `--non-interactive` is used). Each time
@@ -54,29 +50,29 @@ it.
This will look something like (some irrelevant detail removed): This will look something like (some irrelevant detail removed):
```json ```
{ {
"State": "*oauth-islocal,teamdrive,,", "State": "*oauth-islocal,teamdrive,,",
"Option": { "Option": {
"Name": "config_is_local", "Name": "config_is_local",
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n", "Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
"Default": true, "Default": true,
"Examples": [ "Examples": [
{ {
"Value": "true", "Value": "true",
"Help": "Yes" "Help": "Yes"
}, },
{ {
"Value": "false", "Value": "false",
"Help": "No" "Help": "No"
} }
], ],
"Required": false, "Required": false,
"IsPassword": false, "IsPassword": false,
"Type": "bool", "Type": "bool",
"Exclusive": true, "Exclusive": true,
}, },
"Error": "", "Error": "",
} }
``` ```
@@ -99,9 +95,7 @@ The keys of `Option` are used as follows:
If `Error` is set then it should be shown to the user at the same If `Error` is set then it should be shown to the user at the same
time as the question. time as the question.
```sh rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
```
Note that when using `--continue` all passwords should be passed in Note that when using `--continue` all passwords should be passed in
the clear (not obscured). Any default config values should be passed the clear (not obscured). Any default config values should be passed
@@ -117,6 +111,7 @@ defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration. as a readable demonstration.
``` ```
rclone config update name [key value]+ [flags] rclone config update name [key value]+ [flags]
``` ```
@@ -139,10 +134,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -12,6 +12,7 @@ Prints info about logged in user of remote.
This prints the details of the person logged in to the cloud storage This prints the details of the person logged in to the cloud storage
system. system.
``` ```
rclone config userinfo remote: [flags] rclone config userinfo remote: [flags]
``` ```
@@ -27,10 +28,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
<!-- markdownlint-restore -->

View File

@@ -10,8 +10,8 @@ Convert file and directory names in place.
## Synopsis ## Synopsis
convmv supports advanced path name transformations for converting and renaming
files and directories by applying prefixes, suffixes, and other alterations. convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
| Command | Description | | Command | Description |
|------|------| |------|------|
@@ -20,13 +20,10 @@ files and directories by applying prefixes, suffixes, and other alterations.
| `--name-transform suffix_keep_extension=XXXX` | Appends XXXX to the file name while preserving the original file extension. | | `--name-transform suffix_keep_extension=XXXX` | Appends XXXX to the file name while preserving the original file extension. |
| `--name-transform trimprefix=XXXX` | Removes XXXX if it appears at the start of the file name. | | `--name-transform trimprefix=XXXX` | Removes XXXX if it appears at the start of the file name. |
| `--name-transform trimsuffix=XXXX` | Removes XXXX if it appears at the end of the file name. | | `--name-transform trimsuffix=XXXX` | Removes XXXX if it appears at the end of the file name. |
| `--name-transform regex=pattern/replacement` | Applies a regex-based transformation. | | `--name-transform regex=/pattern/replacement/` | Applies a regex-based transformation. |
| `--name-transform replace=old:new` | Replaces occurrences of old with new in the file name. | | `--name-transform replace=old:new` | Replaces occurrences of old with new in the file name. |
| `--name-transform date={YYYYMMDD}` | Appends or prefixes the specified date format. | | `--name-transform date={YYYYMMDD}` | Appends or prefixes the specified date format. |
| `--name-transform truncate=N` | Truncates the file name to a maximum of N characters. | | `--name-transform truncate=N` | Truncates the file name to a maximum of N characters. |
| `--name-transform truncate_keep_extension=N` | Truncates the file name to a maximum of N characters while preserving the original file extension. |
| `--name-transform truncate_bytes=N` | Truncates the file name to a maximum of N bytes (not characters). |
| `--name-transform truncate_bytes_keep_extension=N` | Truncates the file name to a maximum of N bytes (not characters) while preserving the original file extension. |
| `--name-transform base64encode` | Encodes the file name in Base64. | | `--name-transform base64encode` | Encodes the file name in Base64. |
| `--name-transform base64decode` | Decodes a Base64-encoded file name. | | `--name-transform base64decode` | Decodes a Base64-encoded file name. |
| `--name-transform encoder=ENCODING` | Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh). | | `--name-transform encoder=ENCODING` | Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh). |
@@ -41,227 +38,211 @@ files and directories by applying prefixes, suffixes, and other alterations.
| `--name-transform nfd` | Converts the file name to NFD Unicode normalization form. | | `--name-transform nfd` | Converts the file name to NFD Unicode normalization form. |
| `--name-transform nfkc` | Converts the file name to NFKC Unicode normalization form. | | `--name-transform nfkc` | Converts the file name to NFKC Unicode normalization form. |
| `--name-transform nfkd` | Converts the file name to NFKD Unicode normalization form. | | `--name-transform nfkd` | Converts the file name to NFKD Unicode normalization form. |
| `--name-transform command=/path/to/my/programfile names.` | Executes an external program to transform. | | `--name-transform command=/path/to/my/programfile names.` | Executes an external program to transform |
Conversion modes:
```text Conversion modes:
none
nfc
nfd
nfkc
nfkd
replace
prefix
suffix
suffix_keep_extension
trimprefix
trimsuffix
index
date
truncate
truncate_keep_extension
truncate_bytes
truncate_bytes_keep_extension
base64encode
base64decode
encoder
decoder
ISO-8859-1
Windows-1252
Macintosh
charmap
lowercase
uppercase
titlecase
ascii
url
regex
command
``` ```
none
Char maps: nfc
nfd
```text nfkc
IBM-Code-Page-037 nfkd
IBM-Code-Page-437 replace
IBM-Code-Page-850 prefix
IBM-Code-Page-852 suffix
IBM-Code-Page-855 suffix_keep_extension
Windows-Code-Page-858 trimprefix
IBM-Code-Page-860 trimsuffix
IBM-Code-Page-862 index
IBM-Code-Page-863 date
IBM-Code-Page-865 truncate
IBM-Code-Page-866 base64encode
IBM-Code-Page-1047 base64decode
IBM-Code-Page-1140 encoder
ISO-8859-1 decoder
ISO-8859-2 ISO-8859-1
ISO-8859-3 Windows-1252
ISO-8859-4 Macintosh
ISO-8859-5 charmap
ISO-8859-6 lowercase
ISO-8859-7 uppercase
ISO-8859-8 titlecase
ISO-8859-9 ascii
ISO-8859-10 url
ISO-8859-13 regex
ISO-8859-14 command
ISO-8859-15
ISO-8859-16
KOI8-R
KOI8-U
Macintosh
Macintosh-Cyrillic
Windows-874
Windows-1250
Windows-1251
Windows-1252
Windows-1253
Windows-1254
Windows-1255
Windows-1256
Windows-1257
Windows-1258
X-User-Defined
``` ```
Char maps:
Encoding masks:
```text
Asterisk
BackQuote
BackSlash
Colon
CrLf
Ctl
Del
Dollar
Dot
DoubleQuote
Exclamation
Hash
InvalidUtf8
LeftCrLfHtVt
LeftPeriod
LeftSpace
LeftTilde
LtGt
None
Percent
Pipe
Question
Raw
RightCrLfHtVt
RightPeriod
RightSpace
Semicolon
SingleQuote
Slash
SquareBracket
``` ```
IBM-Code-Page-037
IBM-Code-Page-437
IBM-Code-Page-850
IBM-Code-Page-852
IBM-Code-Page-855
Windows-Code-Page-858
IBM-Code-Page-860
IBM-Code-Page-862
IBM-Code-Page-863
IBM-Code-Page-865
IBM-Code-Page-866
IBM-Code-Page-1047
IBM-Code-Page-1140
ISO-8859-1
ISO-8859-2
ISO-8859-3
ISO-8859-4
ISO-8859-5
ISO-8859-6
ISO-8859-7
ISO-8859-8
ISO-8859-9
ISO-8859-10
ISO-8859-13
ISO-8859-14
ISO-8859-15
ISO-8859-16
KOI8-R
KOI8-U
Macintosh
Macintosh-Cyrillic
Windows-874
Windows-1250
Windows-1251
Windows-1252
Windows-1253
Windows-1254
Windows-1255
Windows-1256
Windows-1257
Windows-1258
X-User-Defined
```
Encoding masks:
```
Asterisk
BackQuote
BackSlash
Colon
CrLf
Ctl
Del
Dollar
Dot
DoubleQuote
Exclamation
Hash
InvalidUtf8
LeftCrLfHtVt
LeftPeriod
LeftSpace
LeftTilde
LtGt
None
Percent
Pipe
Question
Raw
RightCrLfHtVt
RightPeriod
RightSpace
Semicolon
SingleQuote
Slash
SquareBracket
```
Examples:
Examples: ```
```console
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,uppercase" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,uppercase"
// Output: STORIES/THE QUICK BROWN FOX!.TXT // Output: STORIES/THE QUICK BROWN FOX!.TXT
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,replace=Fox:Turtle" --name-transform "all,replace=Quick:Slow" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,replace=Fox:Turtle" --name-transform "all,replace=Quick:Slow"
// Output: stories/The Slow Brown Turtle!.txt // Output: stories/The Slow Brown Turtle!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,base64encode" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,base64encode"
// Output: c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0 // Output: c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0
``` ```
```console ```
rclone convmv "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0" --name-transform "all,base64decode" rclone convmv "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0" --name-transform "all,base64decode"
// Output: stories/The Quick Brown Fox!.txt // Output: stories/The Quick Brown Fox!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfc" rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfc"
// Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt // Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfd" rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfd"
// Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt // Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown 🦊 Fox!.txt" --name-transform "all,ascii" rclone convmv "stories/The Quick Brown 🦊 Fox!.txt" --name-transform "all,ascii"
// Output: stories/The Quick Brown Fox!.txt // Output: stories/The Quick Brown Fox!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,trimsuffix=.txt" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,trimsuffix=.txt"
// Output: stories/The Quick Brown Fox! // Output: stories/The Quick Brown Fox!
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,prefix=OLD_" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,prefix=OLD_"
// Output: OLD_stories/OLD_The Quick Brown Fox!.txt // Output: OLD_stories/OLD_The Quick Brown Fox!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,charmap=ISO-8859-7" rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,charmap=ISO-8859-7"
// Output: stories/The Quick Brown _ Fox Went to the Caf_!.txt // Output: stories/The Quick Brown _ Fox Went to the Caf_!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox: A Memoir [draft].txt" --name-transform "all,encoder=Colon,SquareBracket" rclone convmv "stories/The Quick Brown Fox: A Memoir [draft].txt" --name-transform "all,encoder=Colon,SquareBracket"
// Output: stories/The Quick Brown Fox A Memoir draft.txt // Output: stories/The Quick Brown Fox A Memoir draft.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,truncate=21" rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,truncate=21"
// Output: stories/The Quick Brown 🦊 Fox // Output: stories/The Quick Brown 🦊 Fox
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,command=echo" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,command=echo"
// Output: stories/The Quick Brown Fox!.txt // Output: stories/The Quick Brown Fox!.txt
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{YYYYMMDD}" rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{YYYYMMDD}"
// Output: stories/The Quick Brown Fox!-20251121 // Output: stories/The Quick Brown Fox!-20250618
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{macfriendlytime}" rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{macfriendlytime}"
// Output: stories/The Quick Brown Fox!-2025-11-21 0505PM // Output: stories/The Quick Brown Fox!-2025-06-18 0148PM
``` ```
```console ```
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,regex=[\\.\\w]/ab" rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,regex=[\\.\\w]/ab"
// Output: ababababababab/ababab ababababab ababababab ababab!abababab // Output: ababababababab/ababab ababababab ababababab ababab!abababab
``` ```
The regex command generally accepts Perl-style regular expressions, the exact Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
The replacement string may contain capturing group variables, referencing
capturing groups using the syntax `$name` or `${name}`, where the name can
refer to a named capturing group or it can simply be the index as a number.
To insert a literal $, use $$.
Multiple transformations can be used in sequence, applied
in the order they are specified on the command line.
The `--name-transform` flag is also available in `sync`, `copy`, and `move`. The `--name-transform` flag is also available in `sync`, `copy`, and `move`.
## Files vs Directories # Files vs Directories
By default `--name-transform` will only apply to file names. The means only the By default `--name-transform` will only apply to file names. The means only the leaf file name will be transformed.
leaf file name will be transformed. However some of the transforms would be However some of the transforms would be better applied to the whole path or just directories.
better applied to the whole path or just directories. To choose which which To choose which which part of the file path is affected some tags can be added to the `--name-transform`.
part of the file path is affected some tags can be added to the `--name-transform`.
| Tag | Effect | | Tag | Effect |
|------|------| |------|------|
@@ -269,58 +250,42 @@ part of the file path is affected some tags can be added to the `--name-transfor
| `dir` | Only transform name of directories - these may appear anywhere in the path | | `dir` | Only transform name of directories - these may appear anywhere in the path |
| `all` | Transform the entire path for files and directories | | `all` | Transform the entire path for files and directories |
This is used by adding the tag into the transform name like this: This is used by adding the tag into the transform name like this: `--name-transform file,prefix=ABC` or `--name-transform dir,prefix=DEF`.
`--name-transform file,prefix=ABC` or `--name-transform dir,prefix=DEF`.
For some conversions using all is more likely to be useful, for example For some conversions using all is more likely to be useful, for example `--name-transform all,nfc`.
`--name-transform all,nfc`.
Note that `--name-transform` may not add path separators `/` to the name. Note that `--name-transform` may not add path separators `/` to the name. This will cause an error.
This will cause an error.
## Ordering and Conflicts # Ordering and Conflicts
- Transformations will be applied in the order specified by the user. * Transformations will be applied in the order specified by the user.
- If the `file` tag is in use (the default) then only the leaf name of files * If the `file` tag is in use (the default) then only the leaf name of files will be transformed.
will be transformed. * If the `dir` tag is in use then directories anywhere in the path will be transformed
- If the `dir` tag is in use then directories anywhere in the path will be * If the `all` tag is in use then directories and files anywhere in the path will be transformed
transformed * Each transformation will be run one path segment at a time.
- If the `all` tag is in use then directories and files anywhere in the path * If a transformation adds a `/` or ends up with an empty path segment then that will be an error.
will be transformed * It is up to the user to put the transformations in a sensible order.
- Each transformation will be run one path segment at a time. * Conflicting transformations, such as `prefix` followed by `trimprefix` or `nfc` followed by `nfd`, are possible.
- If a transformation adds a `/` or ends up with an empty path segment then * Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
that will be an error. user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
- It is up to the user to put the transformations in a sensible order. * Users should be aware that certain combinations may lead to unexpected results and should verify
- Conflicting transformations, such as `prefix` followed by `trimprefix` or transformations using `--dry-run` before execution.
`nfc` followed by `nfd`, are possible.
- Instead of enforcing mutual exclusivity, transformations are applied in
sequence as specified by the user, allowing for intentional use cases
(e.g., trimming one prefix before adding another).
- Users should be aware that certain combinations may lead to unexpected
results and should verify transformations using `--dry-run` before execution.
## Race Conditions and Non-Deterministic Behavior # Race Conditions and Non-Deterministic Behavior
Some transformations, such as `replace=old:new`, may introduce conflicts where Some transformations, such as `replace=old:new`, may introduce conflicts where multiple source files map to the same destination name.
multiple source files map to the same destination name. This can lead to race This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
conditions when performing concurrent transfers. It is up to the user to * If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
anticipate these. * Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
- If two files from the source are transformed into the same name at the
destination, the final state may be non-deterministic.
- Running rclone check after a sync using such transformations may erroneously
report missing or differing files due to overwritten results.
To minimize risks, users should: To minimize risks, users should:
* Carefully review transformations that may introduce conflicts.
* Use `--dry-run` to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
* Consider disabling concurrency with `--transfers=1` if necessary.
* Certain transformations (e.g. `prefix`) will have a multiplying effect every time they are used. Avoid these when using `bisync`.
- Carefully review transformations that may introduce conflicts.
- Use `--dry-run` to inspect changes before executing a sync (but keep in mind
that it won't show the effect of non-deterministic transformations).
- Avoid transformations that cause multiple distinct source files to map to the
same destination name.
- Consider disabling concurrency with `--transfers=1` if necessary.
- Certain transformations (e.g. `prefix`) will have a multiplying effect every
time they are used. Avoid these when using `bisync`.
``` ```
rclone convmv dest:path --name-transform XXX [flags] rclone convmv dest:path --name-transform XXX [flags]
@@ -341,7 +306,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -382,7 +347,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -392,7 +357,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -422,17 +387,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -28,30 +28,22 @@ go there.
For example For example
```sh rclone copy source:sourcepath dest:destpath
rclone copy source:sourcepath dest:destpath
```
Let's say there are two files in sourcepath Let's say there are two files in sourcepath
```text sourcepath/one.txt
sourcepath/one.txt sourcepath/two.txt
sourcepath/two.txt
```
This copies them to This copies them to
```text destpath/one.txt
destpath/one.txt destpath/two.txt
destpath/two.txt
```
Not to Not to
```text destpath/sourcepath/one.txt
destpath/sourcepath/one.txt destpath/sourcepath/two.txt
destpath/sourcepath/two.txt
```
If you are familiar with `rsync`, rclone always works as if you had If you are familiar with `rsync`, rclone always works as if you had
written a trailing `/` - meaning "copy the contents of this directory". written a trailing `/` - meaning "copy the contents of this directory".
@@ -67,30 +59,27 @@ For example, if you have many files in /path/to/src but only a few of
them change every day, you can copy all the files which have changed them change every day, you can copy all the files which have changed
recently very efficiently like this: recently very efficiently like this:
```sh rclone copy --max-age 24h --no-traverse /path/to/src remote:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
```
Rclone will sync the modification times of files and directories if Rclone will sync the modification times of files and directories if
the backend supports it. If metadata syncing is required then use the the backend supports it. If metadata syncing is required then use the
`--metadata` flag. `--metadata` flag.
Note that the modification time and metadata for the root directory Note that the modification time and metadata for the root directory
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652) will **not** be synced. See https://github.com/rclone/rclone/issues/7652
for more info. for more info.
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics. **Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
**Note**: Use the `--dry-run` or the `--interactive`/`-i` flag to test without **Note**: Use the `--dry-run` or the `--interactive`/`-i` flag to test without copying anything.
copying anything.
## Logger Flags # Logger Flags
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` flags write paths,
flags write paths, one per line, to the file name (or stdout if it is `-`) one per line, to the file name (or stdout if it is `-`) supplied. What they write is described
supplied. What they write is described in the help below. For example in the help below. For example `--differ` will write all paths which are present
`--differ` will write all paths which are present on both the source and on both the source and destination but different.
destination but different.
The `--combined` flag will write a file (or stdout) which contains all The `--combined` flag will write a file (or stdout) which contains all
file paths with a symbol and then a space and then the path to tell file paths with a symbol and then a space and then the path to tell
@@ -123,7 +112,9 @@ are not currently supported:
Note also that each file is logged during execution, as opposed to after, so it Note also that each file is logged during execution, as opposed to after, so it
is most useful as a predictor of what SHOULD happen to each file is most useful as a predictor of what SHOULD happen to each file
(which may or may not match what actually DID). (which may or may not match what actually DID.)
``` ```
rclone copy source:path dest:path [flags] rclone copy source:path dest:path [flags]
@@ -149,7 +140,7 @@ rclone copy source:path dest:path [flags]
--missing-on-dst string Report all files missing from the destination to this file --missing-on-dst string Report all files missing from the destination to this file
--missing-on-src string Report all files missing from the source to this file --missing-on-src string Report all files missing from the source to this file
-s, --separator string Separator for the items in the format (default ";") -s, --separator string Separator for the items in the format (default ";")
-t, --timeformat string Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05) -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -159,7 +150,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -200,7 +191,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -210,7 +201,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -240,17 +231,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -19,40 +19,33 @@ name. If the source is a directory then it acts exactly like the
So So
```console rclone copyto src dst
rclone copyto src dst
```
where src and dst are rclone paths, either `remote:path` or where src and dst are rclone paths, either remote:path or
`/path/to/local` or `C:\windows\path\if\on\windows`. /path/to/local or C:\windows\path\if\on\windows.
This will: This will:
```text if src is file
if src is file copy it to dst, overwriting an existing file if it exists
copy it to dst, overwriting an existing file if it exists if src is directory
if src is directory copy it to dst, overwriting existing files if they exist
copy it to dst, overwriting existing files if they exist see copy command for full details
see copy command for full details
```
This doesn't transfer files that are identical on src and dst, testing This doesn't transfer files that are identical on src and dst, testing
by size and modification time or MD5SUM. It doesn't delete files from by size and modification time or MD5SUM. It doesn't delete files from
the destination. the destination.
*If you are looking to copy just a byte range of a file, please see *If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
`rclone cat --offset X --count Y`.*
**Note**: Use the `-P`/`--progress` flag to view **Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
real-time transfer statistics.
## Logger Flags # Logger Flags
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` flags write paths,
flags write paths, one per line, to the file name (or stdout if it is `-`) one per line, to the file name (or stdout if it is `-`) supplied. What they write is described
supplied. What they write is described in the help below. For example in the help below. For example `--differ` will write all paths which are present
`--differ` will write all paths which are present on both the source and on both the source and destination but different.
destination but different.
The `--combined` flag will write a file (or stdout) which contains all The `--combined` flag will write a file (or stdout) which contains all
file paths with a symbol and then a space and then the path to tell file paths with a symbol and then a space and then the path to tell
@@ -85,7 +78,9 @@ are not currently supported:
Note also that each file is logged during execution, as opposed to after, so it Note also that each file is logged during execution, as opposed to after, so it
is most useful as a predictor of what SHOULD happen to each file is most useful as a predictor of what SHOULD happen to each file
(which may or may not match what actually DID). (which may or may not match what actually DID.)
``` ```
rclone copyto source:path dest:path [flags] rclone copyto source:path dest:path [flags]
@@ -110,7 +105,7 @@ rclone copyto source:path dest:path [flags]
--missing-on-dst string Report all files missing from the destination to this file --missing-on-dst string Report all files missing from the destination to this file
--missing-on-src string Report all files missing from the source to this file --missing-on-src string Report all files missing from the source to this file
-s, --separator string Separator for the items in the format (default ";") -s, --separator string Separator for the items in the format (default ";")
-t, --timeformat string Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05) -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -120,7 +115,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -161,7 +156,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -171,7 +166,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -201,17 +196,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -22,23 +22,12 @@ set in HTTP headers, it will be used instead of the name from the URL.
With `--print-filename` in addition, the resulting file name will be With `--print-filename` in addition, the resulting file name will be
printed. printed.
Setting `--no-clobber` will prevent overwriting file on the Setting `--no-clobber` will prevent overwriting file on the
destination if there is one with the same name. destination if there is one with the same name.
Setting `--stdout` or making the output file name `-` Setting `--stdout` or making the output file name `-`
will cause the output to be written to standard output. will cause the output to be written to standard output.
Setting `--urls` allows you to input a CSV file of URLs in format: URL,
FILENAME. If `--urls` is in use then replace the URL in the arguments with the
file containing the URLs, e.g.:
```sh
rclone copyurl --urls myurls.csv remote:dir
```
Missing filenames will be autogenerated equivalent to using `--auto-filename`.
Note that `--stdout` and `--print-filename` are incompatible with `--urls`.
This will do `--transfers` copies in parallel. Note that if `--auto-filename`
is desired for all URLs then a file with only URLs and no filename can be used.
## Troubleshooting ## Troubleshooting
If you can't get `rclone copyurl` to work then here are some things you can try: If you can't get `rclone copyurl` to work then here are some things you can try:
@@ -49,6 +38,8 @@ If you can't get `rclone copyurl` to work then here are some things you can try:
- `--user agent curl` - some sites have whitelists for curl's user-agent - try that - `--user agent curl` - some sites have whitelists for curl's user-agent - try that
- Make sure the site works with `curl` directly - Make sure the site works with `curl` directly
``` ```
rclone copyurl https://example.com dest:path [flags] rclone copyurl https://example.com dest:path [flags]
``` ```
@@ -62,7 +53,6 @@ rclone copyurl https://example.com dest:path [flags]
--no-clobber Prevent overwriting file with same name --no-clobber Prevent overwriting file with same name
-p, --print-filename Print the resulting name from --auto-filename -p, --print-filename Print the resulting name from --auto-filename
--stdout Write the output to stdout rather than a file --stdout Write the output to stdout rather than a file
--urls Use a CSV file of links to process multiple URLs
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -72,7 +62,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -80,10 +70,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -10,7 +10,7 @@ Cryptcheck checks the integrity of an encrypted remote.
## Synopsis ## Synopsis
Checks a remote against an [encrypted](/crypt/) remote. This is the equivalent Checks a remote against a [crypted](/crypt/) remote. This is the equivalent
of running rclone [check](/commands/rclone_check/), but able to check the of running rclone [check](/commands/rclone_check/), but able to check the
checksums of the encrypted remote. checksums of the encrypted remote.
@@ -24,18 +24,14 @@ checksum of the file it has just encrypted.
Use it like this Use it like this
```console rclone cryptcheck /path/to/files encryptedremote:path
rclone cryptcheck /path/to/files encryptedremote:path
```
You can use it like this also, but that will involve downloading all You can use it like this also, but that will involve downloading all
the files in `remote:path`. the files in remote:path.
```console rclone cryptcheck remote:path encryptedremote:path
rclone cryptcheck remote:path encryptedremote:path
```
After it has run it will log the status of the `encryptedremote:`. After it has run it will log the status of the encryptedremote:.
If you supply the `--one-way` flag, it will only check that files in If you supply the `--one-way` flag, it will only check that files in
the source match the files in the destination, not the other way the source match the files in the destination, not the other way
@@ -61,6 +57,7 @@ you what happened to it. These are reminiscent of diff files.
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int) The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
option for more information. option for more information.
``` ```
rclone cryptcheck remote:path cryptedremote:path [flags] rclone cryptcheck remote:path cryptedremote:path [flags]
``` ```
@@ -85,7 +82,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags used for check commands Flags used for check commands
```text ```
--max-backlog int Maximum number of objects in sync or check backlog (default 10000) --max-backlog int Maximum number of objects in sync or check backlog (default 10000)
``` ```
@@ -93,7 +90,7 @@ Flags used for check commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -123,17 +120,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -17,13 +17,13 @@ If you supply the `--reverse` flag, it will return encrypted file names.
use it like this use it like this
```console rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
rclone cryptdecode --reverse encryptedremote: filename1 filename2 rclone cryptdecode --reverse encryptedremote: filename1 filename2
```
Another way to accomplish this is by using the `rclone backend encode` (or `decode`) command.
See the documentation on the [crypt](/crypt/) overlay for more info.
Another way to accomplish this is by using the `rclone backend encode` (or `decode`)
command. See the documentation on the [crypt](/crypt/) overlay for more info.
``` ```
rclone cryptdecode encryptedremote: encryptedfilename [flags] rclone cryptdecode encryptedremote: encryptedfilename [flags]
@@ -40,10 +40,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -30,15 +30,14 @@ directories have been merged.
Next, if deduping by name, for every group of duplicate file names / Next, if deduping by name, for every group of duplicate file names /
hashes, it will delete all but one identical file it finds without hashes, it will delete all but one identical file it finds without
confirmation. This means that for most duplicated files the confirmation. This means that for most duplicated files the `dedupe` command will not be interactive.
`dedupe` command will not be interactive.
`dedupe` considers files to be identical if they have the `dedupe` considers files to be identical if they have the
same file path and the same hash. If the backend does not support same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
hashes (e.g. crypt wrapping Google Drive) then they will never be found Google Drive) then they will never be found to be identical. If you
to be identical. If you use the `--size-only` flag then files use the `--size-only` flag then files will be considered
will be considered identical if they have the same size (any hash will be identical if they have the same size (any hash will be ignored). This
ignored). This can be useful on crypt backends which do not support hashes. can be useful on crypt backends which do not support hashes.
Next rclone will resolve the remaining duplicates. Exactly which Next rclone will resolve the remaining duplicates. Exactly which
action is taken depends on the dedupe mode. By default, rclone will action is taken depends on the dedupe mode. By default, rclone will
@@ -51,82 +50,71 @@ Here is an example run.
Before - with duplicates Before - with duplicates
```console $ rclone lsl drive:dupes
$ rclone lsl drive:dupes 6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:16.798000000 one.txt 6048320 2016-03-05 16:23:11.775000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt 564374 2016-03-05 16:23:06.731000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt 6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt 6048320 2016-03-05 16:22:46.185000000 two.txt
6048320 2016-03-05 16:22:46.185000000 two.txt 1744073 2016-03-05 16:22:38.104000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt 564374 2016-03-05 16:22:52.118000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
```
Now the `dedupe` session Now the `dedupe` session
```console $ rclone dedupe drive:dupes
$ rclone dedupe drive:dupes 2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode. one.txt: Found 4 files with duplicate names
one.txt: Found 4 files with duplicate names one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36") one.txt: 2 duplicates remain
one.txt: 2 duplicates remain 1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36 2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81 s) Skip and do nothing
s) Skip and do nothing k) Keep just one (choose which in next step)
k) Keep just one (choose which in next step) r) Rename all to be different (by changing file.jpg to file-1.jpg)
r) Rename all to be different (by changing file.jpg to file-1.jpg) s/k/r> k
s/k/r> k Enter the number of the file to keep> 1
Enter the number of the file to keep> 1 one.txt: Deleted 1 extra copies
one.txt: Deleted 1 extra copies two.txt: Found 3 files with duplicate names
two.txt: Found 3 files with duplicate names two.txt: 3 duplicates remain
two.txt: 3 duplicates remain 1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81 2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36 3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802 s) Skip and do nothing
s) Skip and do nothing k) Keep just one (choose which in next step)
k) Keep just one (choose which in next step) r) Rename all to be different (by changing file.jpg to file-1.jpg)
r) Rename all to be different (by changing file.jpg to file-1.jpg) s/k/r> r
s/k/r> r two-1.txt: renamed from: two.txt
two-1.txt: renamed from: two.txt two-2.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt two-3.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
```
The result being The result being
```console $ rclone lsl drive:dupes
$ rclone lsl drive:dupes 6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:16.798000000 one.txt 564374 2016-03-05 16:22:52.118000000 two-1.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt 6048320 2016-03-05 16:22:46.185000000 two-2.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt 1744073 2016-03-05 16:22:38.104000000 two-3.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
```
Dedupe can be run non interactively using the `--dedupe-mode` flag Dedupe can be run non interactively using the `--dedupe-mode` flag or by using an extra parameter with the same value
or by using an extra parameter with the same value
- `--dedupe-mode interactive` - interactive as above. * `--dedupe-mode interactive` - interactive as above.
- `--dedupe-mode skip` - removes identical files then skips anything left. * `--dedupe-mode skip` - removes identical files then skips anything left.
- `--dedupe-mode first` - removes identical files then keeps the first one. * `--dedupe-mode first` - removes identical files then keeps the first one.
- `--dedupe-mode newest` - removes identical files then keeps the newest one. * `--dedupe-mode newest` - removes identical files then keeps the newest one.
- `--dedupe-mode oldest` - removes identical files then keeps the oldest one. * `--dedupe-mode oldest` - removes identical files then keeps the oldest one.
- `--dedupe-mode largest` - removes identical files then keeps the largest one. * `--dedupe-mode largest` - removes identical files then keeps the largest one.
- `--dedupe-mode smallest` - removes identical files then keeps the smallest one. * `--dedupe-mode smallest` - removes identical files then keeps the smallest one.
- `--dedupe-mode rename` - removes identical files then renames the rest to be different. * `--dedupe-mode rename` - removes identical files then renames the rest to be different.
- `--dedupe-mode list` - lists duplicate dirs and files only and changes nothing. * `--dedupe-mode list` - lists duplicate dirs and files only and changes nothing.
For example, to rename all the identically named photos in your Google Photos For example, to rename all the identically named photos in your Google Photos directory, do
directory, do
```console rclone dedupe --dedupe-mode rename "drive:Google Photos"
rclone dedupe --dedupe-mode rename "drive:Google Photos"
```
Or Or
```console rclone dedupe rename "drive:Google Photos"
rclone dedupe rename "drive:Google Photos"
```
``` ```
rclone dedupe [mode] remote:path [flags] rclone dedupe [mode] remote:path [flags]
@@ -147,7 +135,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -155,10 +143,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -17,23 +17,19 @@ obeys include/exclude filters so can be used to selectively delete files.
alone. If you want to delete a directory and all of its contents use alone. If you want to delete a directory and all of its contents use
the [purge](/commands/rclone_purge/) command. the [purge](/commands/rclone_purge/) command.
If you supply the `--rmdirs` flag, it will remove all empty directories along If you supply the `--rmdirs` flag, it will remove all empty directories along with it.
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/) You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only. [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
For example, to delete all files bigger than 100 MiB, you may first want to For example, to delete all files bigger than 100 MiB, you may first want to
check what would be deleted (use either): check what would be deleted (use either):
```sh rclone --min-size 100M lsl remote:path
rclone --min-size 100M lsl remote:path rclone --dry-run --min-size 100M delete remote:path
rclone --dry-run --min-size 100M delete remote:path
```
Then proceed with the actual delete: Then proceed with the actual delete:
```sh rclone --min-size 100M delete remote:path
rclone --min-size 100M delete remote:path
```
That reads "delete everything with a minimum size of 100 MiB", hence That reads "delete everything with a minimum size of 100 MiB", hence
delete all files bigger than 100 MiB. delete all files bigger than 100 MiB.
@@ -41,6 +37,7 @@ delete all files bigger than 100 MiB.
**Important**: Since this can cause data loss, test first with the **Important**: Since this can cause data loss, test first with the
`--dry-run` or the `--interactive`/`-i` flag. `--dry-run` or the `--interactive`/`-i` flag.
``` ```
rclone delete remote:path [flags] rclone delete remote:path [flags]
``` ```
@@ -59,7 +56,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -69,7 +66,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -99,17 +96,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -11,8 +11,9 @@ Remove a single file from remote.
## Synopsis ## Synopsis
Remove a single file from remote. Unlike `delete` it cannot be used to Remove a single file from remote. Unlike `delete` it cannot be used to
remove a directory and it doesn't obey include/exclude filters - if the remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
specified file exists, it will always be removed. it will always be removed.
``` ```
rclone deletefile remote:path [flags] rclone deletefile remote:path [flags]
@@ -31,7 +32,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -39,10 +40,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -28,10 +28,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -18,21 +18,19 @@ users.
[git-annex]: https://git-annex.branchable.com/ [git-annex]: https://git-annex.branchable.com/
## Installation on Linux Installation on Linux
---------------------
1. Skip this step if your version of git-annex is [10.20240430] or newer. 1. Skip this step if your version of git-annex is [10.20240430] or newer.
Otherwise, you must create a symlink somewhere on your PATH with a particular Otherwise, you must create a symlink somewhere on your PATH with a particular
name. This symlink helps git-annex tell rclone it wants to run the "gitannex" name. This symlink helps git-annex tell rclone it wants to run the "gitannex"
subcommand. subcommand.
Create the helper symlink in "$HOME/bin": ```sh
# Create the helper symlink in "$HOME/bin".
```console
ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin" ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin"
Verify the new symlink is on your PATH: # Verify the new symlink is on your PATH.
```console
which git-annex-remote-rclone-builtin which git-annex-remote-rclone-builtin
``` ```
@@ -44,15 +42,11 @@ users.
Start by asking git-annex to describe the remote's available configuration Start by asking git-annex to describe the remote's available configuration
parameters. parameters.
If you skipped step 1: ```sh
# If you skipped step 1:
```console
git annex initremote MyRemote type=rclone --whatelse git annex initremote MyRemote type=rclone --whatelse
```
If you created a symlink in step 1: # If you created a symlink in step 1:
```console
git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse
``` ```
@@ -68,7 +62,7 @@ users.
be one configured in your rclone.conf file, which can be located with `rclone be one configured in your rclone.conf file, which can be located with `rclone
config file`. config file`.
```console ```sh
git annex initremote MyRemote \ git annex initremote MyRemote \
type=external \ type=external \
externaltype=rclone-builtin \ externaltype=rclone-builtin \
@@ -82,12 +76,13 @@ users.
remote**. This command is very new and has not been tested on many rclone remote**. This command is very new and has not been tested on many rclone
backends. Caveat emptor! backends. Caveat emptor!
```console ```sh
git annex testremote MyRemote git annex testremote MyRemote
``` ```
Happy annexing! Happy annexing!
``` ```
rclone gitannex [flags] rclone gitannex [flags]
``` ```
@@ -102,10 +97,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -29,28 +29,25 @@ as a relative path).
Run without a hash to see the list of all supported hashes, e.g. Run without a hash to see the list of all supported hashes, e.g.
```console $ rclone hashsum
$ rclone hashsum Supported hashes are:
Supported hashes are: * md5
- md5 * sha1
- sha1 * whirlpool
- whirlpool * crc32
- crc32 * sha256
- sha256 * sha512
- sha512 * blake3
- blake3 * xxh3
- xxh3 * xxh128
- xxh128
```
Then Then
```console $ rclone hashsum MD5 remote:path
rclone hashsum MD5 remote:path
```
Note that hash names are case insensitive and values are output in lower case. Note that hash names are case insensitive and values are output in lower case.
``` ```
rclone hashsum [<hash> remote:path] [flags] rclone hashsum [<hash> remote:path] [flags]
``` ```
@@ -72,7 +69,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -102,17 +99,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -12,12 +12,10 @@ Generate public link to file/folder.
Create, retrieve or remove a public link to the given file or folder. Create, retrieve or remove a public link to the given file or folder.
```console rclone link remote:path/to/file
rclone link remote:path/to/file rclone link remote:path/to/folder/
rclone link remote:path/to/folder/ rclone link --unlink remote:path/to/folder/
rclone link --unlink remote:path/to/folder/ rclone link --expire 1d remote:path/to/file
rclone link --expire 1d remote:path/to/file
```
If you supply the --expire flag, it will set the expiration time If you supply the --expire flag, it will set the expiration time
otherwise it will use the default (100 years). **Note** not all otherwise it will use the default (100 years). **Note** not all
@@ -30,9 +28,10 @@ don't will just ignore it.
If successful, the last line of the output will contain the If successful, the last line of the output will contain the
link. Exact capabilities depend on the remote, but the link will link. Exact capabilities depend on the remote, but the link will
always by default be created with the least constraints - e.g. no always by default be created with the least constraints e.g. no
expiry, no password protection, accessible without account. expiry, no password protection, accessible without account.
``` ```
rclone link remote:path [flags] rclone link remote:path [flags]
``` ```
@@ -49,10 +48,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -10,6 +10,7 @@ List all the remotes in the config file and defined in environment variables.
## Synopsis ## Synopsis
Lists all the available remotes from the config file, or the remotes matching Lists all the available remotes from the config file, or the remotes matching
an optional filter. an optional filter.
@@ -23,6 +24,7 @@ Result can be filtered by a filter argument which applies to all attributes,
and/or filter flags specific for each attribute. The values must be specified and/or filter flags specific for each attribute. The values must be specified
according to regular rclone filtering pattern syntax. according to regular rclone filtering pattern syntax.
``` ```
rclone listremotes [<filter>] [flags] rclone listremotes [<filter>] [flags]
``` ```
@@ -44,10 +46,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -12,25 +12,24 @@ List the objects in the path with size and path.
Lists the objects in the source path to standard output in a human Lists the objects in the source path to standard output in a human
readable format with size and path. Recurses by default. readable format with size and path. Recurses by default.
E.g. Eg
$ rclone ls swift:bucket
60295 bevajer5jef
90613 canole
94467 diwogej7
37600 fubuwic
```console
$ rclone ls swift:bucket
60295 bevajer5jef
90613 canole
94467 diwogej7
37600 fubuwic
```
Any of the filtering options can be applied to this command. Any of the filtering options can be applied to this command.
There are several related list commands There are several related list commands
- `ls` to list size and path of objects only * `ls` to list size and path of objects only
- `lsl` to list modification time, size and path of objects only * `lsl` to list modification time, size and path of objects only
- `lsd` to list directories only * `lsd` to list directories only
- `lsf` to list objects and directories in easy to parse format * `lsf` to list objects and directories in easy to parse format
- `lsjson` to list objects and directories in JSON format * `lsjson` to list objects and directories in JSON format
`ls`,`lsl`,`lsd` are designed to be human-readable. `ls`,`lsl`,`lsd` are designed to be human-readable.
`lsf` is designed to be human and machine-readable. `lsf` is designed to be human and machine-readable.
@@ -38,13 +37,13 @@ There are several related list commands
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion. Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
use `-R` to make them recurse.
Listing a nonexistent directory will produce an error except for Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs - remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes). the bucket-based remotes).
``` ```
rclone ls remote:path [flags] rclone ls remote:path [flags]
``` ```
@@ -62,7 +61,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -92,17 +91,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -15,34 +15,31 @@ recurse by default. Use the `-R` flag to recurse.
This command lists the total size of the directory (if known, -1 if This command lists the total size of the directory (if known, -1 if
not), the modification time (if known, the current time if not), the not), the modification time (if known, the current time if not), the
number of objects in the directory (if known, -1 if not) and the name number of objects in the directory (if known, -1 if not) and the name
of the directory, E.g. of the directory, Eg
```console $ rclone lsd swift:
$ rclone lsd swift: 494000 2018-04-26 08:43:20 10000 10000files
494000 2018-04-26 08:43:20 10000 10000files 65 2018-04-26 08:43:20 1 1File
65 2018-04-26 08:43:20 1 1File
```
Or Or
```console $ rclone lsd drive:test
$ rclone lsd drive:test -1 2016-10-17 17:41:53 -1 1000files
-1 2016-10-17 17:41:53 -1 1000files -1 2017-01-03 14:40:54 -1 2500files
-1 2017-01-03 14:40:54 -1 2500files -1 2017-07-08 14:39:28 -1 4000files
-1 2017-07-08 14:39:28 -1 4000files
```
If you just want the directory names use `rclone lsf --dirs-only`. If you just want the directory names use `rclone lsf --dirs-only`.
Any of the filtering options can be applied to this command. Any of the filtering options can be applied to this command.
There are several related list commands There are several related list commands
- `ls` to list size and path of objects only * `ls` to list size and path of objects only
- `lsl` to list modification time, size and path of objects only * `lsl` to list modification time, size and path of objects only
- `lsd` to list directories only * `lsd` to list directories only
- `lsf` to list objects and directories in easy to parse format * `lsf` to list objects and directories in easy to parse format
- `lsjson` to list objects and directories in JSON format * `lsjson` to list objects and directories in JSON format
`ls`,`lsl`,`lsd` are designed to be human-readable. `ls`,`lsl`,`lsd` are designed to be human-readable.
`lsf` is designed to be human and machine-readable. `lsf` is designed to be human and machine-readable.
@@ -50,13 +47,13 @@ There are several related list commands
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion. Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
use `-R` to make them recurse.
Listing a nonexistent directory will produce an error except for Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs - remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes). the bucket-based remotes).
``` ```
rclone lsd remote:path [flags] rclone lsd remote:path [flags]
``` ```
@@ -75,7 +72,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -105,17 +102,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -15,47 +15,41 @@ standard output in a form which is easy to parse by scripts. By
default this will just be the names of the objects and directories, default this will just be the names of the objects and directories,
one per line. The directories will have a / suffix. one per line. The directories will have a / suffix.
E.g. Eg
```console $ rclone lsf swift:bucket
$ rclone lsf swift:bucket bevajer5jef
bevajer5jef canole
canole diwogej7
diwogej7 ferejej3gux/
ferejej3gux/ fubuwic
fubuwic
```
Use the `--format` option to control what gets listed. By default this Use the `--format` option to control what gets listed. By default this
is just the path, but you can use these parameters to control the is just the path, but you can use these parameters to control the
output: output:
```text p - path
p - path s - size
s - size t - modification time
t - modification time h - hash
h - hash i - ID of object
i - ID of object o - Original ID of underlying object
o - Original ID of underlying object m - MimeType of object if known
m - MimeType of object if known e - encrypted name
e - encrypted name T - tier of storage if known, e.g. "Hot" or "Cool"
T - tier of storage if known, e.g. "Hot" or "Cool" M - Metadata of object in JSON blob format, eg {"key":"value"}
M - Metadata of object in JSON blob format, eg {"key":"value"}
```
So if you wanted the path, size and modification time, you would use So if you wanted the path, size and modification time, you would use
`--format "pst"`, or maybe `--format "tsp"` to put the path last. `--format "pst"`, or maybe `--format "tsp"` to put the path last.
E.g. Eg
```console $ rclone lsf --format "tsp" swift:bucket
$ rclone lsf --format "tsp" swift:bucket 2016-06-25 18:55:41;60295;bevajer5jef
2016-06-25 18:55:41;60295;bevajer5jef 2016-06-25 18:55:43;90613;canole
2016-06-25 18:55:43;90613;canole 2016-06-25 18:55:43;94467;diwogej7
2016-06-25 18:55:43;94467;diwogej7 2018-04-26 08:50:45;0;ferejej3gux/
2018-04-26 08:50:45;0;ferejej3gux/ 2016-06-25 18:55:40;37600;fubuwic
2016-06-25 18:55:40;37600;fubuwic
```
If you specify "h" in the format you will get the MD5 hash by default, If you specify "h" in the format you will get the MD5 hash by default,
use the `--hash` flag to change which hash you want. Note that this use the `--hash` flag to change which hash you want. Note that this
@@ -66,20 +60,16 @@ type.
For example, to emulate the md5sum command you can use For example, to emulate the md5sum command you can use
```console rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
```
E.g. Eg
```console $ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket 7908e352297f0f530b84a756f188baa3 bevajer5jef
7908e352297f0f530b84a756f188baa3 bevajer5jef cd65ac234e6fea5925974a51cdd865cc canole
cd65ac234e6fea5925974a51cdd865cc canole 03b5341b4f234b9d984d03ad076bae91 diwogej7
03b5341b4f234b9d984d03ad076bae91 diwogej7 8fd37c3810dd660778137ac3a66cc06d fubuwic
8fd37c3810dd660778137ac3a66cc06d fubuwic 99713e14a4c4ff553acaf1930fad985b gixacuh7ku
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
```
(Though "rclone md5sum ." is an easier way of typing this.) (Though "rclone md5sum ." is an easier way of typing this.)
@@ -87,28 +77,24 @@ By default the separator is ";" this can be changed with the
`--separator` flag. Note that separators aren't escaped in the path so `--separator` flag. Note that separators aren't escaped in the path so
putting it last is a good strategy. putting it last is a good strategy.
E.g. Eg
```console $ rclone lsf --separator "," --format "tshp" swift:bucket
$ rclone lsf --separator "," --format "tshp" swift:bucket 2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef 2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole 2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7 2018-04-26 08:52:53,0,,ferejej3gux/
2018-04-26 08:52:53,0,,ferejej3gux/ 2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
```
You can output in CSV standard format. This will escape things in " You can output in CSV standard format. This will escape things in "
if they contain, if they contain ,
E.g. Eg
```console $ rclone lsf --csv --files-only --format ps remote:path
$ rclone lsf --csv --files-only --format ps remote:path test.log,22355
test.log,22355 test.sh,449
test.sh,449 "this file contains a comma, in the file name.txt",6
"this file contains a comma, in the file name.txt",6
```
Note that the `--absolute` parameter is useful for making lists of files Note that the `--absolute` parameter is useful for making lists of files
to pass to an rclone copy with the `--files-from-raw` flag. to pass to an rclone copy with the `--files-from-raw` flag.
@@ -116,38 +102,32 @@ to pass to an rclone copy with the `--files-from-raw` flag.
For example, to find all the files modified within one day and copy For example, to find all the files modified within one day and copy
those only (without traversing the whole directory structure): those only (without traversing the whole directory structure):
```console rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files rclone copy --files-from-raw new_files /path/to/local remote:path
rclone copy --files-from-raw new_files /path/to/local remote:path
```
The default time format is `'2006-01-02 15:04:05'`. The default time format is `'2006-01-02 15:04:05'`.
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with [Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the `--time-format` flag.
the `--time-format` flag. Examples: Examples:
```console rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)' rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000' rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00' rclone lsf remote:path --format pt --time-format RFC3339
rclone lsf remote:path --format pt --time-format RFC3339 rclone lsf remote:path --format pt --time-format DateOnly
rclone lsf remote:path --format pt --time-format DateOnly rclone lsf remote:path --format pt --time-format max
rclone lsf remote:path --format pt --time-format max `--time-format max` will automatically truncate '`2006-01-02 15:04:05.000000000`'
rclone lsf remote:path --format pt --time-format unix
rclone lsf remote:path --format pt --time-format unixnano
```
`--time-format max` will automatically truncate `2006-01-02 15:04:05.000000000`
to the maximum precision supported by the remote. to the maximum precision supported by the remote.
Any of the filtering options can be applied to this command. Any of the filtering options can be applied to this command.
There are several related list commands There are several related list commands
- `ls` to list size and path of objects only * `ls` to list size and path of objects only
- `lsl` to list modification time, size and path of objects only * `lsl` to list modification time, size and path of objects only
- `lsd` to list directories only * `lsd` to list directories only
- `lsf` to list objects and directories in easy to parse format * `lsf` to list objects and directories in easy to parse format
- `lsjson` to list objects and directories in JSON format * `lsjson` to list objects and directories in JSON format
`ls`,`lsl`,`lsd` are designed to be human-readable. `ls`,`lsl`,`lsd` are designed to be human-readable.
`lsf` is designed to be human and machine-readable. `lsf` is designed to be human and machine-readable.
@@ -155,13 +135,13 @@ There are several related list commands
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion. Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
use `-R` to make them recurse.
Listing a nonexistent directory will produce an error except for Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs - remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes). the bucket-based remotes).
``` ```
rclone lsf remote:path [flags] rclone lsf remote:path [flags]
``` ```
@@ -179,7 +159,7 @@ rclone lsf remote:path [flags]
-h, --help help for lsf -h, --help help for lsf
-R, --recursive Recurse into the listing -R, --recursive Recurse into the listing
-s, --separator string Separator for the items in the format (default ";") -s, --separator string Separator for the items in the format (default ";")
-t, --time-format string Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05) -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -189,7 +169,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -219,17 +199,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -14,27 +14,25 @@ List directories and objects in the path in JSON format.
The output is an array of Items, where each Item looks like this: The output is an array of Items, where each Item looks like this:
```json {
{ "Hashes" : {
"Hashes" : { "SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f", "MD5" : "b1946ac92492d2347c6235b4d2611184",
"MD5" : "b1946ac92492d2347c6235b4d2611184", "DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc" },
}, "ID": "y2djkhiujf83u33",
"ID": "y2djkhiujf83u33", "OrigID": "UYOJVTUW00Q1RzTDA",
"OrigID": "UYOJVTUW00Q1RzTDA", "IsBucket" : false,
"IsBucket" : false, "IsDir" : false,
"IsDir" : false, "MimeType" : "application/octet-stream",
"MimeType" : "application/octet-stream", "ModTime" : "2017-05-31T16:15:57.034468261+01:00",
"ModTime" : "2017-05-31T16:15:57.034468261+01:00", "Name" : "file.txt",
"Name" : "file.txt", "Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338", "EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338", "Path" : "full/path/goes/here/file.txt",
"Path" : "full/path/goes/here/file.txt", "Size" : 6,
"Size" : 6, "Tier" : "hot",
"Tier" : "hot", }
}
```
The exact set of properties included depends on the backend: The exact set of properties included depends on the backend:
@@ -96,11 +94,11 @@ Any of the filtering options can be applied to this command.
There are several related list commands There are several related list commands
- `ls` to list size and path of objects only * `ls` to list size and path of objects only
- `lsl` to list modification time, size and path of objects only * `lsl` to list modification time, size and path of objects only
- `lsd` to list directories only * `lsd` to list directories only
- `lsf` to list objects and directories in easy to parse format * `lsf` to list objects and directories in easy to parse format
- `lsjson` to list objects and directories in JSON format * `lsjson` to list objects and directories in JSON format
`ls`,`lsl`,`lsd` are designed to be human-readable. `ls`,`lsl`,`lsd` are designed to be human-readable.
`lsf` is designed to be human and machine-readable. `lsf` is designed to be human and machine-readable.
@@ -108,13 +106,13 @@ There are several related list commands
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion. Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
use `-R` to make them recurse.
Listing a nonexistent directory will produce an error except for Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs - remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes). the bucket-based remotes).
``` ```
rclone lsjson remote:path [flags] rclone lsjson remote:path [flags]
``` ```
@@ -143,7 +141,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -173,17 +171,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -13,25 +13,24 @@ List the objects in path with modification time, size and path.
Lists the objects in the source path to standard output in a human Lists the objects in the source path to standard output in a human
readable format with modification time, size and path. Recurses by default. readable format with modification time, size and path. Recurses by default.
E.g. Eg
$ rclone lsl swift:bucket
60295 2016-06-25 18:55:41.062626927 bevajer5jef
90613 2016-06-25 18:55:43.302607074 canole
94467 2016-06-25 18:55:43.046609333 diwogej7
37600 2016-06-25 18:55:40.814629136 fubuwic
```console
$ rclone lsl swift:bucket
60295 2016-06-25 18:55:41.062626927 bevajer5jef
90613 2016-06-25 18:55:43.302607074 canole
94467 2016-06-25 18:55:43.046609333 diwogej7
37600 2016-06-25 18:55:40.814629136 fubuwic
```
Any of the filtering options can be applied to this command. Any of the filtering options can be applied to this command.
There are several related list commands There are several related list commands
- `ls` to list size and path of objects only * `ls` to list size and path of objects only
- `lsl` to list modification time, size and path of objects only * `lsl` to list modification time, size and path of objects only
- `lsd` to list directories only * `lsd` to list directories only
- `lsf` to list objects and directories in easy to parse format * `lsf` to list objects and directories in easy to parse format
- `lsjson` to list objects and directories in JSON format * `lsjson` to list objects and directories in JSON format
`ls`,`lsl`,`lsd` are designed to be human-readable. `ls`,`lsl`,`lsd` are designed to be human-readable.
`lsf` is designed to be human and machine-readable. `lsf` is designed to be human and machine-readable.
@@ -39,13 +38,13 @@ There are several related list commands
Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion. Note that `ls` and `lsl` recurse by default - use `--max-depth 1` to stop the recursion.
The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - The other list commands `lsd`,`lsf`,`lsjson` do not recurse by default - use `-R` to make them recurse.
use `-R` to make them recurse.
Listing a nonexistent directory will produce an error except for Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs - remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes). the bucket-based remotes).
``` ```
rclone lsl remote:path [flags] rclone lsl remote:path [flags]
``` ```
@@ -63,7 +62,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -93,17 +92,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -27,6 +27,7 @@ by not passing a remote:path, or by passing a hyphen as remote:path
when there is data to read (if not, the hyphen will be treated literally, when there is data to read (if not, the hyphen will be treated literally,
as a relative path). as a relative path).
``` ```
rclone md5sum remote:path [flags] rclone md5sum remote:path [flags]
``` ```
@@ -48,7 +49,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -78,17 +79,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -24,7 +24,7 @@ See the [global flags page](/flags/) for global options not listed here.
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -32,10 +32,5 @@ Important flags useful for most commands
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -13,7 +13,7 @@ Mount the remote as file system on a mountpoint.
Rclone mount allows Linux, FreeBSD, macOS and Windows to Rclone mount allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with FUSE. mount any of Rclone's cloud storage systems as a file system with FUSE.
First set up your remote using `rclone config`. Check it works with `rclone ls` etc. First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
On Linux and macOS, you can run mount in either foreground or background (aka On Linux and macOS, you can run mount in either foreground or background (aka
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
@@ -28,9 +28,7 @@ mount, waits until success or timeout and exits with appropriate code
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount` On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
is an **empty** **existing** directory: is an **empty** **existing** directory:
```console rclone mount remote:path/to/files /path/to/local/mount
rclone mount remote:path/to/files /path/to/local/mount
```
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows) On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. If foreground mount is used interactively from a console window, for details. If foreground mount is used interactively from a console window,
@@ -40,30 +38,26 @@ used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
The following examples will mount to an automatically assigned drive, The following examples will mount to an automatically assigned drive,
to specific drive letter `X:`, to path `C:\path\parent\mount` to specific drive letter `X:`, to path `C:\path\parent\mount`
(where parent directory or drive must exist, and mount must **not** exist, (where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
and the last example will mount as network share `\\cloud\remote` and map it to an the last example will mount as network share `\\cloud\remote` and map it to an
automatically assigned drive: automatically assigned drive:
```console rclone mount remote:path/to/files *
rclone mount remote:path/to/files * rclone mount remote:path/to/files X:
rclone mount remote:path/to/files X: rclone mount remote:path/to/files C:\path\parent\mount
rclone mount remote:path/to/files C:\path\parent\mount rclone mount remote:path/to/files \\cloud\remote
rclone mount remote:path/to/files \\cloud\remote
```
When the program ends while in foreground mode, either via Ctrl+C or receiving When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped. a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually: When running in background mode the user will have to stop the mount manually:
```console # Linux
# Linux fusermount -u /path/to/local/mount
fusermount -u /path/to/local/mount #... or on some systems
#... or on some systems fusermount3 -u /path/to/local/mount
fusermount3 -u /path/to/local/mount # OS X or Linux when using nfsmount
# OS X or Linux when using nfsmount umount /path/to/local/mount
umount /path/to/local/mount
```
The umount operation can fail, for example when the mountpoint is busy. The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually. When that happens, it is the user's responsibility to stop the mount manually.
@@ -77,7 +71,7 @@ at all, then 1 PiB is set as both the total and the free size.
## Installing on Windows ## Installing on Windows
To run `rclone mount on Windows`, you will need to To run rclone mount on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/). download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/winfsp/winfsp) is an open-source [WinFsp](https://github.com/winfsp/winfsp) is an open-source
@@ -98,22 +92,20 @@ thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default. In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive as a network share. If you mount an rclone remote using the default, fixed drive mode
mode and experience unexpected program errors, freezes or other issues, consider and experience unexpected program errors, freezes or other issues, consider mounting
mounting as a network drive instead. as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter, When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **nonexistent** subdirectory of an **existing** parent or to a path representing a **nonexistent** subdirectory of an **existing** parent
directory or drive. Using the special value `*` will tell rclone to directory or drive. Using the special value `*` will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving automatically assign the next available drive letter, starting with Z: and moving backward.
backward. Examples: Examples:
```console rclone mount remote:path/to/files *
rclone mount remote:path/to/files * rclone mount remote:path/to/files X:
rclone mount remote:path/to/files X: rclone mount remote:path/to/files C:\path\parent\mount
rclone mount remote:path/to/files C:\path\parent\mount rclone mount remote:path/to/files X:
rclone mount remote:path/to/files X:
```
Option `--volname` can be used to set a custom volume name for the mounted Option `--volname` can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path. file system. The default is to use the remote name and path.
@@ -123,28 +115,24 @@ to your mount command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter. be mounted to a drive letter.
```console rclone mount remote:path/to/files X: --network-mode
rclone mount remote:path/to/files X: --network-mode
```
A volume name specified with `--volname` will be used to create the network share A volume name specified with `--volname` will be used to create the network share path.
path. A complete UNC path, such as `\\cloud\remote`, optionally with path A complete UNC path, such as `\\cloud\remote`, optionally with path
`\\cloud\remote\madeup\path`, will be used as is. Any other `\\cloud\remote\madeup\path`, will be used as is. Any other
string will be used as the share part, after a default prefix `\\server\`. string will be used as the share part, after a default prefix `\\server\`.
If no volume name is specified then `\\server\share` will be used. If no volume name is specified then `\\server\share` will be used.
You must make sure the volume name is unique when you are mounting more than one You must make sure the volume name is unique when you are mounting more than one drive,
drive, or else the mount command will fail. The share name will treated as the or else the mount command will fail. The share name will treated as the volume label for
volume label for the mapped drive, shown in Windows Explorer etc, while the complete the mapped drive, shown in Windows Explorer etc, while the complete
`\\server\share` will be reported as the remote UNC path by `\\server\share` will be reported as the remote UNC path by
`net use` etc, just like a normal network drive mapping. `net use` etc, just like a normal network drive mapping.
If you specify a full network share UNC path with `--volname`, this will implicitly If you specify a full network share UNC path with `--volname`, this will implicitly
set the `--network-mode` option, so the following two examples have same result: set the `--network-mode` option, so the following two examples have same result:
```console rclone mount remote:path/to/files X: --network-mode
rclone mount remote:path/to/files X: --network-mode rclone mount remote:path/to/files X: --volname \\server\share
rclone mount remote:path/to/files X: --volname \\server\share
```
You may also specify the network share UNC path as the mountpoint itself. Then rclone You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with `*` and use that as will automatically assign a drive letter, same as with `*` and use that as
@@ -152,16 +140,15 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
specified with the `--volname` option. This will also implicitly set specified with the `--volname` option. This will also implicitly set
the `--network-mode` option. This means the following two examples have same result: the `--network-mode` option. This means the following two examples have same result:
```console rclone mount remote:path/to/files \\cloud\remote
rclone mount remote:path/to/files \\cloud\remote rclone mount remote:path/to/files * --volname \\cloud\remote
rclone mount remote:path/to/files * --volname \\cloud\remote
```
There is yet another way to enable network mode, and to set the share path, There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly: and that is to pass the "native" libfuse/WinFsp option directly:
`--fuse-flag --VolumePrefix=\server\share`. Note that the path `--fuse-flag --VolumePrefix=\server\share`. Note that the path
must be with just a single backslash prefix in this case. must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method. *Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping) [Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
@@ -174,11 +161,11 @@ The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows, permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL). based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list The mounted filesystem will normally get three entries in its access-control list (ACL),
(ACL), representing permissions for the POSIX permission scopes: Owner, group and representing permissions for the POSIX permission scopes: Owner, group and others.
others. By default, the owner and group will be taken from the current user, and By default, the owner and group will be taken from the current user, and the built-in
the built-in group "Everyone" will be used to represent others. The user/group can group "Everyone" will be used to represent others. The user/group can be customized
be customized with FUSE options "UserName" and "GroupName", with FUSE options "UserName" and "GroupName",
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`. e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
The permissions on each entry will be set according to [options](#options) The permissions on each entry will be set according to [options](#options)
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix `--dir-perms` and `--file-perms`, which takes a value in traditional Unix
@@ -278,74 +265,58 @@ does not suffer from the same limitations.
## Mounting on macOS ## Mounting on macOS
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
[macFUSE](https://osxfuse.github.io/) (also known as osxfuse) or (also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
[FUSE-T](https://www.fuse-t.org/).macFUSE is a traditional FUSE driver utilizing FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which which "mounts" via an NFSv4 local server.
"mounts" via an NFSv4 local server.
### Unicode Normalization #### Unicode Normalization
It is highly recommended to keep the default of `--no-unicode-normalization=false` It is highly recommended to keep the default of `--no-unicode-normalization=false`
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity). for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
### NFS mount ### NFS mount
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) command and mounts
command and mounts it to the specified mountpoint. If you run this in background it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
mode using |--daemon|, you will need to send SIGTERM signal to the rclone process send SIGTERM signal to the rclone process using |kill| command to stop the mount.
using |kill| command to stop the mount.
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
handles stored by the `nfsmount` caching handler. This should not be set too low This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
or you may experience errors when trying to access files. The default is 1000000,
but consider lowering this limit if the server's system resource usage causes problems. but consider lowering this limit if the server's system resource usage causes problems.
### macFUSE Notes ### macFUSE Notes
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
from the website, rclone will locate the macFUSE libraries without any further intervention. the website, rclone will locate the macFUSE libraries without any further intervention.
If however, macFUSE is installed using the [macports](https://www.macports.org/) If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
package manager, the following addition steps are required. the following addition steps are required.
```console sudo mkdir /usr/local/lib
sudo mkdir /usr/local/lib cd /usr/local/lib
cd /usr/local/lib sudo ln -s /opt/local/lib/libfuse.2.dylib
sudo ln -s /opt/local/lib/libfuse.2.dylib
```
### FUSE-T Limitations, Caveats, and Notes ### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are There are some limitations, caveats, and notes about how it works. These are current as
current as of FUSE-T version 1.0.14. of FUSE-T version 1.0.14.
#### ModTime update on read #### ModTime update on read
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats): As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
> File access and modification times cannot be set separately as it seems to be an > File access and modification times cannot be set separately as it seems to be an
> issue with the NFS client which always modifies both. Can be reproduced with > issue with the NFS client which always modifies both. Can be reproduced with
> 'touch -m' and 'touch -a' commands > 'touch -m' and 'touch -a' commands
This means that viewing files with various tools, notably macOS Finder, will cause This means that viewing files with various tools, notably macOS Finder, will cause rlcone
rlcone to update the modification time of the file. This may make rclone upload a to update the modification time of the file. This may make rclone upload a full new copy
full new copy of the file. of the file.
#### Read Only mounts #### Read Only mounts
When mounting with `--read-only`, attempts to write to files will fail *silently* When mounting with `--read-only`, attempts to write to files will fail *silently* as
as opposed to with a clear warning as in macFUSE. opposed to with a clear warning as in macFUSE.
# Mounting on Linux
On newer versions of Ubuntu, you may encounter the following error when running
`rclone mount`:
> NOTICE: mount helper error: fusermount3: mount failed: Permission denied
> CRITICAL: Fatal error: failed to mount FUSE fs: fusermount: exit status 1
This may be due to newer [Apparmor](https://wiki.ubuntu.com/AppArmor) restrictions,
which can be disabled with `sudo aa-disable /usr/bin/fusermount3` (you may need to
`sudo apt install apparmor-utils` beforehand).
## Limitations ## Limitations
@@ -446,14 +417,12 @@ helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
rclone will detect it and translate command-line arguments appropriately. rclone will detect it and translate command-line arguments appropriately.
Now you can run classic mounts like this: Now you can run classic mounts like this:
```
```console
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
``` ```
or create systemd mount units: or create systemd mount units:
```
```ini
# /etc/systemd/system/mnt-data.mount # /etc/systemd/system/mnt-data.mount
[Unit] [Unit]
Description=Mount for /mnt/data Description=Mount for /mnt/data
@@ -465,8 +434,7 @@ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone
``` ```
optionally accompanied by systemd automount unit optionally accompanied by systemd automount unit
```
```ini
# /etc/systemd/system/mnt-data.automount # /etc/systemd/system/mnt-data.automount
[Unit] [Unit]
Description=AutoMount for /mnt/data Description=AutoMount for /mnt/data
@@ -478,8 +446,7 @@ WantedBy=multi-user.target
``` ```
or add in `/etc/fstab` a line like or add in `/etc/fstab` a line like
```
```console
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0 sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
``` ```
@@ -528,10 +495,8 @@ directory should be considered up to date and not refreshed from the
backend. Changes made through the VFS will appear immediately or backend. Changes made through the VFS will appear immediately or
invalidate the cache. invalidate the cache.
```text
--dir-cache-time duration Time to cache directory entries for (default 5m0s) --dir-cache-time duration Time to cache directory entries for (default 5m0s)
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s) --poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
```
However, changes made directly on the cloud storage by the web However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once interface or a different copy of rclone will only be picked up once
@@ -543,22 +508,16 @@ You can send a `SIGHUP` signal to rclone for it to flush all
directory caches, regardless of how old they are. Assuming only one directory caches, regardless of how old they are. Assuming only one
rclone instance is running, you can reset the cache like this: rclone instance is running, you can reset the cache like this:
```console kill -SIGHUP $(pidof rclone)
kill -SIGHUP $(pidof rclone)
```
If you configure rclone with a [remote control](/rc) then you can use If you configure rclone with a [remote control](/rc) then you can use
rclone rc to flush the whole directory cache: rclone rc to flush the whole directory cache:
```console rclone rc vfs/forget
rclone rc vfs/forget
```
Or individual files or directories: Or individual files or directories:
```console rclone rc vfs/forget file=path/to/file dir=path/to/dir
rclone rc vfs/forget file=path/to/file dir=path/to/dir
```
## VFS File Buffering ## VFS File Buffering
@@ -589,7 +548,6 @@ write simultaneously to a file. See below for more details.
Note that the VFS cache is separate from the cache backend and you may Note that the VFS cache is separate from the cache backend and you may
find that you need one or the other or both. find that you need one or the other or both.
```text
--cache-dir string Directory rclone will use for caching. --cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
@@ -597,7 +555,6 @@ find that you need one or the other or both.
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off) --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s) --vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
```
If run with `-vv` rclone will print the location of the file cache. The If run with `-vv` rclone will print the location of the file cache. The
files are stored in the user cache file area which is OS dependent but files are stored in the user cache file area which is OS dependent but
@@ -645,13 +602,13 @@ directly to the remote without caching anything on disk.
This will mean some operations are not possible This will mean some operations are not possible
- Files can't be opened for both read AND write * Files can't be opened for both read AND write
- Files opened for write can't be seeked * Files opened for write can't be seeked
- Existing files opened for write must have O_TRUNC set * Existing files opened for write must have O_TRUNC set
- Files open for read with O_TRUNC will be opened write only * Files open for read with O_TRUNC will be opened write only
- Files open for write only will behave as if O_TRUNC was supplied * Files open for write only will behave as if O_TRUNC was supplied
- Open modes O_APPEND, O_TRUNC are ignored * Open modes O_APPEND, O_TRUNC are ignored
- If an upload fails it can't be retried * If an upload fails it can't be retried
### --vfs-cache-mode minimal ### --vfs-cache-mode minimal
@@ -661,10 +618,10 @@ write will be a lot more compatible, but uses the minimal disk space.
These operations are not possible These operations are not possible
- Files opened for write only can't be seeked * Files opened for write only can't be seeked
- Existing files opened for write must have O_TRUNC set * Existing files opened for write must have O_TRUNC set
- Files opened for write only will ignore O_APPEND, O_TRUNC * Files opened for write only will ignore O_APPEND, O_TRUNC
- If an upload fails it can't be retried * If an upload fails it can't be retried
### --vfs-cache-mode writes ### --vfs-cache-mode writes
@@ -747,11 +704,9 @@ read, at the cost of an increased number of requests.
These flags control the chunking: These flags control the chunking:
```text
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
--vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-chunk-streams int The number of parallel streams to read at once
```
The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter.
@@ -765,9 +720,9 @@ value is "off", which is the default, the limit is disabled and the chunk size
will grow indefinitely. will grow indefinitely.
With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0` With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0`
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
and so on. When `--vfs-read-chunk-size-limit 500M` is specified, the result would When `--vfs-read-chunk-size-limit 500M` is specified, the result would be
be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on. 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading.
@@ -805,41 +760,32 @@ In particular S3 and Swift benefit hugely from the `--no-modtime` flag
(or use `--use-server-modtime` for a slightly different effect) as each (or use `--use-server-modtime` for a slightly different effect) as each
read of the modification time takes a transaction. read of the modification time takes a transaction.
```text
--no-checksum Don't compare checksums on up/download. --no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up). --no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files. --no-seek Don't allow seeking in files.
--read-only Only allow read-only access. --read-only Only allow read-only access.
```
Sometimes rclone is delivered reads or writes out of order. Rather Sometimes rclone is delivered reads or writes out of order. Rather
than seeking rclone will wait a short time for the in sequence read or than seeking rclone will wait a short time for the in sequence read or
write to come in. These flags only come into effect when not using an write to come in. These flags only come into effect when not using an
on disk cache file. on disk cache file.
```text
--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
```
When using VFS write caching (`--vfs-cache-mode` with value writes or full), When using VFS write caching (`--vfs-cache-mode` with value writes or full),
the global flag `--transfers` can be set to adjust the number of parallel uploads the global flag `--transfers` can be set to adjust the number of parallel uploads of
of modified files from the cache (the related global flag `--checkers` has no modified files from the cache (the related global flag `--checkers` has no effect on the VFS).
effect on the VFS).
```text
--transfers int Number of file transfers to run in parallel (default 4) --transfers int Number of file transfers to run in parallel (default 4)
```
## Symlinks ## Symlinks
By default the VFS does not support symlinks. However this may be By default the VFS does not support symlinks. However this may be
enabled with either of the following flags: enabled with either of the following flags:
```text
--links Translate symlinks to/from regular files with a '.rclonelink' extension. --links Translate symlinks to/from regular files with a '.rclonelink' extension.
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS --vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
```
As most cloud storage systems do not support symlinks directly, rclone As most cloud storage systems do not support symlinks directly, rclone
stores the symlink as a normal file with a special extension. So a stores the symlink as a normal file with a special extension. So a
@@ -851,8 +797,7 @@ Note that `--links` enables symlink translation globally in rclone -
this includes any backend which supports the concept (for example the this includes any backend which supports the concept (for example the
local backend). `--vfs-links` just enables it for the VFS layer. local backend). `--vfs-links` just enables it for the VFS layer.
This scheme is compatible with that used by the This scheme is compatible with that used by the [local backend with the --local-links flag](/local/#symlinks-junction-points).
[local backend with the --local-links flag](/local/#symlinks-junction-points).
The `--vfs-links` flag has been designed for `rclone mount`, `rclone The `--vfs-links` flag has been designed for `rclone mount`, `rclone
nfsmount` and `rclone serve nfs`. nfsmount` and `rclone serve nfs`.
@@ -862,7 +807,7 @@ It hasn't been tested with the other `rclone serve` commands yet.
A limitation of the current implementation is that it expects the A limitation of the current implementation is that it expects the
caller to resolve sub-symlinks. For example given this directory tree caller to resolve sub-symlinks. For example given this directory tree
```text ```
. .
├── dir ├── dir
│   └── file.txt │   └── file.txt
@@ -940,9 +885,7 @@ sync`.
This flag allows you to manually set the statistics about the filing system. This flag allows you to manually set the statistics about the filing system.
It can be useful when those statistics cannot be read correctly automatically. It can be useful when those statistics cannot be read correctly automatically.
```text
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1) --vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
```
## Alternate report of used bytes ## Alternate report of used bytes
@@ -953,7 +896,7 @@ With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size` information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself. and compute the total used space itself.
**WARNING**: Contrary to `rclone size`, this flag ignores filters so that the _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching. calls resulting in extra charges. Use it as a last resort and only with caching.
@@ -971,7 +914,7 @@ Note that some backends won't create metadata unless you pass in the
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata` For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
we get we get
```console ```
$ ls -l /mnt/ $ ls -l /mnt/
total 1048577 total 1048577
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G -rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
@@ -996,6 +939,8 @@ If the file has no metadata it will be returned as `{}` and if there
is an error reading the metadata the error will be returned as is an error reading the metadata the error will be returned as
`{"error":"error string"}`. `{"error":"error string"}`.
``` ```
rclone mount remote:path /path/to/mountpoint [flags] rclone mount remote:path /path/to/mountpoint [flags]
``` ```
@@ -1066,7 +1011,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -1094,10 +1039,5 @@ Flags for filtering directory listings
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -40,7 +40,7 @@ the backend supports it. If metadata syncing is required then use the
`--metadata` flag. `--metadata` flag.
Note that the modification time and metadata for the root directory Note that the modification time and metadata for the root directory
will **not** be synced. See <https://github.com/rclone/rclone/issues/7652> will **not** be synced. See https://github.com/rclone/rclone/issues/7652
for more info. for more info.
**Important**: Since this can cause data loss, test first with the **Important**: Since this can cause data loss, test first with the
@@ -48,13 +48,12 @@ for more info.
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics. **Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
## Logger Flags # Logger Flags
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` flags write paths,
flags write paths, one per line, to the file name (or stdout if it is `-`) one per line, to the file name (or stdout if it is `-`) supplied. What they write is described
supplied. What they write is described in the help below. For example in the help below. For example `--differ` will write all paths which are present
`--differ` will write all paths which are present on both the source and on both the source and destination but different.
destination but different.
The `--combined` flag will write a file (or stdout) which contains all The `--combined` flag will write a file (or stdout) which contains all
file paths with a symbol and then a space and then the path to tell file paths with a symbol and then a space and then the path to tell
@@ -87,7 +86,9 @@ are not currently supported:
Note also that each file is logged during execution, as opposed to after, so it Note also that each file is logged during execution, as opposed to after, so it
is most useful as a predictor of what SHOULD happen to each file is most useful as a predictor of what SHOULD happen to each file
(which may or may not match what actually DID). (which may or may not match what actually DID.)
``` ```
rclone move source:path dest:path [flags] rclone move source:path dest:path [flags]
@@ -114,7 +115,7 @@ rclone move source:path dest:path [flags]
--missing-on-dst string Report all files missing from the destination to this file --missing-on-dst string Report all files missing from the destination to this file
--missing-on-src string Report all files missing from the source to this file --missing-on-src string Report all files missing from the source to this file
-s, --separator string Separator for the items in the format (default ";") -s, --separator string Separator for the items in the format (default ";")
-t, --timeformat string Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05) -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -124,7 +125,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -165,7 +166,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -175,7 +176,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -205,17 +206,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -19,22 +19,18 @@ like the [move](/commands/rclone_move/) command.
So So
```console rclone moveto src dst
rclone moveto src dst
```
where src and dst are rclone paths, either remote:path or where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows. /path/to/local or C:\windows\path\if\on\windows.
This will: This will:
```text if src is file
if src is file move it to dst, overwriting an existing file if it exists
move it to dst, overwriting an existing file if it exists if src is directory
if src is directory move it to dst, overwriting existing files if they exist
move it to dst, overwriting existing files if they exist see move command for full details
see move command for full details
```
This doesn't transfer files that are identical on src and dst, testing This doesn't transfer files that are identical on src and dst, testing
by size and modification time or MD5SUM. src will be deleted on by size and modification time or MD5SUM. src will be deleted on
@@ -45,13 +41,12 @@ successful transfer.
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics. **Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
## Logger Flags # Logger Flags
The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` flags write paths,
flags write paths, one per line, to the file name (or stdout if it is `-`) one per line, to the file name (or stdout if it is `-`) supplied. What they write is described
supplied. What they write is described in the help below. For example in the help below. For example `--differ` will write all paths which are present
`--differ` will write all paths which are present on both the source and on both the source and destination but different.
destination but different.
The `--combined` flag will write a file (or stdout) which contains all The `--combined` flag will write a file (or stdout) which contains all
file paths with a symbol and then a space and then the path to tell file paths with a symbol and then a space and then the path to tell
@@ -84,7 +79,9 @@ are not currently supported:
Note also that each file is logged during execution, as opposed to after, so it Note also that each file is logged during execution, as opposed to after, so it
is most useful as a predictor of what SHOULD happen to each file is most useful as a predictor of what SHOULD happen to each file
(which may or may not match what actually DID). (which may or may not match what actually DID.)
``` ```
rclone moveto source:path dest:path [flags] rclone moveto source:path dest:path [flags]
@@ -109,7 +106,7 @@ rclone moveto source:path dest:path [flags]
--missing-on-dst string Report all files missing from the destination to this file --missing-on-dst string Report all files missing from the destination to this file
--missing-on-src string Report all files missing from the source to this file --missing-on-src string Report all files missing from the source to this file
-s, --separator string Separator for the items in the format (default ";") -s, --separator string Separator for the items in the format (default ";")
-t, --timeformat string Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05) -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
``` ```
Options shared with other commands are described next. Options shared with other commands are described next.
@@ -119,7 +116,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for anything which can copy a file Flags for anything which can copy a file
```text ```
--check-first Do all the checks before starting transfers --check-first Do all the checks before starting transfers
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only) -c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
--compare-dest stringArray Include additional server-side paths during comparison --compare-dest stringArray Include additional server-side paths during comparison
@@ -160,7 +157,7 @@ Flags for anything which can copy a file
Important flags useful for most commands Important flags useful for most commands
```text ```
-n, --dry-run Do a trial run with no permanent changes -n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode -i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more) -v, --verbose count Print lots more stuff (repeat for more)
@@ -170,7 +167,7 @@ Important flags useful for most commands
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -200,17 +197,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -24,45 +24,41 @@ structure as it goes along.
You can interact with the user interface using key presses, You can interact with the user interface using key presses,
press '?' to toggle the help on and off. The supported keys are: press '?' to toggle the help on and off. The supported keys are:
```text ↑,↓ or k,j to Move
↑,↓ or k,j to Move →,l to enter
→,l to enter ←,h to return
←,h to return g toggle graph
g toggle graph c toggle counts
c toggle counts a toggle average size in directory
a toggle average size in directory m toggle modified time
m toggle modified time u toggle human-readable format
u toggle human-readable format n,s,C,A,M sort by name,size,count,asize,mtime
n,s,C,A,M sort by name,size,count,asize,mtime d delete file/directory
d delete file/directory v select file/directory
v select file/directory V enter visual select mode
V enter visual select mode D delete selected files/directories
D delete selected files/directories y copy current path to clipboard
y copy current path to clipboard Y display current path
Y display current path ^L refresh screen (fix screen corruption)
^L refresh screen (fix screen corruption) r recalculate file sizes
r recalculate file sizes ? to toggle help on and off
? to toggle help on and off ESC to close the menu box
ESC to close the menu box q/^c to quit
q/^c to quit
```
Listed files/directories may be prefixed by a one-character flag, Listed files/directories may be prefixed by a one-character flag,
some of them combined with a description in brackets at end of line. some of them combined with a description in brackets at end of line.
These flags have the following meaning: These flags have the following meaning:
```text e means this is an empty directory, i.e. contains no files (but
e means this is an empty directory, i.e. contains no files (but may contain empty subdirectories)
may contain empty subdirectories) ~ means this is a directory where some of the files (possibly in
~ means this is a directory where some of the files (possibly in subdirectories) have unknown size, and therefore the directory
subdirectories) have unknown size, and therefore the directory size may be underestimated (and average size inaccurate, as it
size may be underestimated (and average size inaccurate, as it is average of the files with known sizes).
is average of the files with known sizes). . means an error occurred while reading a subdirectory, and
. means an error occurred while reading a subdirectory, and therefore the directory size may be underestimated (and average
therefore the directory size may be underestimated (and average size inaccurate)
size inaccurate) ! means an error occurred while reading this directory
! means an error occurred while reading this directory
```
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
rclone remotes. It is missing lots of features at the moment rclone remotes. It is missing lots of features at the moment
@@ -75,6 +71,7 @@ For a non-interactive listing of the remote, see the
[tree](/commands/rclone_tree/) command. To just get the total size of [tree](/commands/rclone_tree/) command. To just get the total size of
the remote you can also use the [size](/commands/rclone_size/) command. the remote you can also use the [size](/commands/rclone_size/) command.
``` ```
rclone ncdu remote:path [flags] rclone ncdu remote:path [flags]
``` ```
@@ -92,7 +89,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -122,17 +119,12 @@ Flags for filtering directory listings
Flags for listing directories Flags for listing directories
```text ```
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions --fast-list Use recursive list if available; uses more memory but fewer transactions
``` ```
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -14,7 +14,7 @@ Mount the remote as file system on a mountpoint.
Rclone nfsmount allows Linux, FreeBSD, macOS and Windows to Rclone nfsmount allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with FUSE. mount any of Rclone's cloud storage systems as a file system with FUSE.
First set up your remote using `rclone config`. Check it works with `rclone ls` etc. First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
On Linux and macOS, you can run mount in either foreground or background (aka On Linux and macOS, you can run mount in either foreground or background (aka
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
@@ -29,9 +29,7 @@ mount, waits until success or timeout and exits with appropriate code
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount` On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
is an **empty** **existing** directory: is an **empty** **existing** directory:
```console rclone nfsmount remote:path/to/files /path/to/local/mount
rclone nfsmount remote:path/to/files /path/to/local/mount
```
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows) On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. If foreground mount is used interactively from a console window, for details. If foreground mount is used interactively from a console window,
@@ -41,30 +39,26 @@ used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
The following examples will mount to an automatically assigned drive, The following examples will mount to an automatically assigned drive,
to specific drive letter `X:`, to path `C:\path\parent\mount` to specific drive letter `X:`, to path `C:\path\parent\mount`
(where parent directory or drive must exist, and mount must **not** exist, (where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
and the last example will mount as network share `\\cloud\remote` and map it to an the last example will mount as network share `\\cloud\remote` and map it to an
automatically assigned drive: automatically assigned drive:
```console rclone nfsmount remote:path/to/files *
rclone nfsmount remote:path/to/files * rclone nfsmount remote:path/to/files X:
rclone nfsmount remote:path/to/files X: rclone nfsmount remote:path/to/files C:\path\parent\mount
rclone nfsmount remote:path/to/files C:\path\parent\mount rclone nfsmount remote:path/to/files \\cloud\remote
rclone nfsmount remote:path/to/files \\cloud\remote
```
When the program ends while in foreground mode, either via Ctrl+C or receiving When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped. a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually: When running in background mode the user will have to stop the mount manually:
```console # Linux
# Linux fusermount -u /path/to/local/mount
fusermount -u /path/to/local/mount #... or on some systems
#... or on some systems fusermount3 -u /path/to/local/mount
fusermount3 -u /path/to/local/mount # OS X or Linux when using nfsmount
# OS X or Linux when using nfsmount umount /path/to/local/mount
umount /path/to/local/mount
```
The umount operation can fail, for example when the mountpoint is busy. The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually. When that happens, it is the user's responsibility to stop the mount manually.
@@ -78,7 +72,7 @@ at all, then 1 PiB is set as both the total and the free size.
## Installing on Windows ## Installing on Windows
To run `rclone nfsmount on Windows`, you will need to To run rclone nfsmount on Windows, you will need to
download and install [WinFsp](http://www.secfs.net/winfsp/). download and install [WinFsp](http://www.secfs.net/winfsp/).
[WinFsp](https://github.com/winfsp/winfsp) is an open-source [WinFsp](https://github.com/winfsp/winfsp) is an open-source
@@ -99,22 +93,20 @@ thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default. In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive as a network share. If you mount an rclone remote using the default, fixed drive mode
mode and experience unexpected program errors, freezes or other issues, consider and experience unexpected program errors, freezes or other issues, consider mounting
mounting as a network drive instead. as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter, When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **nonexistent** subdirectory of an **existing** parent or to a path representing a **nonexistent** subdirectory of an **existing** parent
directory or drive. Using the special value `*` will tell rclone to directory or drive. Using the special value `*` will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving automatically assign the next available drive letter, starting with Z: and moving backward.
backward. Examples: Examples:
```console rclone nfsmount remote:path/to/files *
rclone nfsmount remote:path/to/files * rclone nfsmount remote:path/to/files X:
rclone nfsmount remote:path/to/files X: rclone nfsmount remote:path/to/files C:\path\parent\mount
rclone nfsmount remote:path/to/files C:\path\parent\mount rclone nfsmount remote:path/to/files X:
rclone nfsmount remote:path/to/files X:
```
Option `--volname` can be used to set a custom volume name for the mounted Option `--volname` can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path. file system. The default is to use the remote name and path.
@@ -124,28 +116,24 @@ to your nfsmount command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter. be mounted to a drive letter.
```console rclone nfsmount remote:path/to/files X: --network-mode
rclone nfsmount remote:path/to/files X: --network-mode
```
A volume name specified with `--volname` will be used to create the network share A volume name specified with `--volname` will be used to create the network share path.
path. A complete UNC path, such as `\\cloud\remote`, optionally with path A complete UNC path, such as `\\cloud\remote`, optionally with path
`\\cloud\remote\madeup\path`, will be used as is. Any other `\\cloud\remote\madeup\path`, will be used as is. Any other
string will be used as the share part, after a default prefix `\\server\`. string will be used as the share part, after a default prefix `\\server\`.
If no volume name is specified then `\\server\share` will be used. If no volume name is specified then `\\server\share` will be used.
You must make sure the volume name is unique when you are mounting more than one You must make sure the volume name is unique when you are mounting more than one drive,
drive, or else the mount command will fail. The share name will treated as the or else the mount command will fail. The share name will treated as the volume label for
volume label for the mapped drive, shown in Windows Explorer etc, while the complete the mapped drive, shown in Windows Explorer etc, while the complete
`\\server\share` will be reported as the remote UNC path by `\\server\share` will be reported as the remote UNC path by
`net use` etc, just like a normal network drive mapping. `net use` etc, just like a normal network drive mapping.
If you specify a full network share UNC path with `--volname`, this will implicitly If you specify a full network share UNC path with `--volname`, this will implicitly
set the `--network-mode` option, so the following two examples have same result: set the `--network-mode` option, so the following two examples have same result:
```console rclone nfsmount remote:path/to/files X: --network-mode
rclone nfsmount remote:path/to/files X: --network-mode rclone nfsmount remote:path/to/files X: --volname \\server\share
rclone nfsmount remote:path/to/files X: --volname \\server\share
```
You may also specify the network share UNC path as the mountpoint itself. Then rclone You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with `*` and use that as will automatically assign a drive letter, same as with `*` and use that as
@@ -153,16 +141,15 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
specified with the `--volname` option. This will also implicitly set specified with the `--volname` option. This will also implicitly set
the `--network-mode` option. This means the following two examples have same result: the `--network-mode` option. This means the following two examples have same result:
```console rclone nfsmount remote:path/to/files \\cloud\remote
rclone nfsmount remote:path/to/files \\cloud\remote rclone nfsmount remote:path/to/files * --volname \\cloud\remote
rclone nfsmount remote:path/to/files * --volname \\cloud\remote
```
There is yet another way to enable network mode, and to set the share path, There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly: and that is to pass the "native" libfuse/WinFsp option directly:
`--fuse-flag --VolumePrefix=\server\share`. Note that the path `--fuse-flag --VolumePrefix=\server\share`. Note that the path
must be with just a single backslash prefix in this case. must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method. *Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping) [Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
@@ -175,11 +162,11 @@ The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows, permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL). based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list The mounted filesystem will normally get three entries in its access-control list (ACL),
(ACL), representing permissions for the POSIX permission scopes: Owner, group and representing permissions for the POSIX permission scopes: Owner, group and others.
others. By default, the owner and group will be taken from the current user, and By default, the owner and group will be taken from the current user, and the built-in
the built-in group "Everyone" will be used to represent others. The user/group can group "Everyone" will be used to represent others. The user/group can be customized
be customized with FUSE options "UserName" and "GroupName", with FUSE options "UserName" and "GroupName",
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`. e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
The permissions on each entry will be set according to [options](#options) The permissions on each entry will be set according to [options](#options)
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix `--dir-perms` and `--file-perms`, which takes a value in traditional Unix
@@ -279,74 +266,58 @@ does not suffer from the same limitations.
## Mounting on macOS ## Mounting on macOS
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
[macFUSE](https://osxfuse.github.io/) (also known as osxfuse) or (also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
[FUSE-T](https://www.fuse-t.org/).macFUSE is a traditional FUSE driver utilizing FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which which "mounts" via an NFSv4 local server.
"mounts" via an NFSv4 local server.
### Unicode Normalization #### Unicode Normalization
It is highly recommended to keep the default of `--no-unicode-normalization=false` It is highly recommended to keep the default of `--no-unicode-normalization=false`
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity). for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
### NFS mount ### NFS mount
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) command and mounts
command and mounts it to the specified mountpoint. If you run this in background it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
mode using |--daemon|, you will need to send SIGTERM signal to the rclone process send SIGTERM signal to the rclone process using |kill| command to stop the mount.
using |kill| command to stop the mount.
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
handles stored by the `nfsmount` caching handler. This should not be set too low This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
or you may experience errors when trying to access files. The default is 1000000,
but consider lowering this limit if the server's system resource usage causes problems. but consider lowering this limit if the server's system resource usage causes problems.
### macFUSE Notes ### macFUSE Notes
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
from the website, rclone will locate the macFUSE libraries without any further intervention. the website, rclone will locate the macFUSE libraries without any further intervention.
If however, macFUSE is installed using the [macports](https://www.macports.org/) If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
package manager, the following addition steps are required. the following addition steps are required.
```console sudo mkdir /usr/local/lib
sudo mkdir /usr/local/lib cd /usr/local/lib
cd /usr/local/lib sudo ln -s /opt/local/lib/libfuse.2.dylib
sudo ln -s /opt/local/lib/libfuse.2.dylib
```
### FUSE-T Limitations, Caveats, and Notes ### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are There are some limitations, caveats, and notes about how it works. These are current as
current as of FUSE-T version 1.0.14. of FUSE-T version 1.0.14.
#### ModTime update on read #### ModTime update on read
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats): As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
> File access and modification times cannot be set separately as it seems to be an > File access and modification times cannot be set separately as it seems to be an
> issue with the NFS client which always modifies both. Can be reproduced with > issue with the NFS client which always modifies both. Can be reproduced with
> 'touch -m' and 'touch -a' commands > 'touch -m' and 'touch -a' commands
This means that viewing files with various tools, notably macOS Finder, will cause This means that viewing files with various tools, notably macOS Finder, will cause rlcone
rlcone to update the modification time of the file. This may make rclone upload a to update the modification time of the file. This may make rclone upload a full new copy
full new copy of the file. of the file.
#### Read Only mounts #### Read Only mounts
When mounting with `--read-only`, attempts to write to files will fail *silently* When mounting with `--read-only`, attempts to write to files will fail *silently* as
as opposed to with a clear warning as in macFUSE. opposed to with a clear warning as in macFUSE.
# Mounting on Linux
On newer versions of Ubuntu, you may encounter the following error when running
`rclone mount`:
> NOTICE: mount helper error: fusermount3: mount failed: Permission denied
> CRITICAL: Fatal error: failed to mount FUSE fs: fusermount: exit status 1
This may be due to newer [Apparmor](https://wiki.ubuntu.com/AppArmor) restrictions,
which can be disabled with `sudo aa-disable /usr/bin/fusermount3` (you may need to
`sudo apt install apparmor-utils` beforehand).
## Limitations ## Limitations
@@ -447,14 +418,12 @@ helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
rclone will detect it and translate command-line arguments appropriately. rclone will detect it and translate command-line arguments appropriately.
Now you can run classic mounts like this: Now you can run classic mounts like this:
```
```console
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
``` ```
or create systemd mount units: or create systemd mount units:
```
```ini
# /etc/systemd/system/mnt-data.mount # /etc/systemd/system/mnt-data.mount
[Unit] [Unit]
Description=Mount for /mnt/data Description=Mount for /mnt/data
@@ -466,8 +435,7 @@ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone
``` ```
optionally accompanied by systemd automount unit optionally accompanied by systemd automount unit
```
```ini
# /etc/systemd/system/mnt-data.automount # /etc/systemd/system/mnt-data.automount
[Unit] [Unit]
Description=AutoMount for /mnt/data Description=AutoMount for /mnt/data
@@ -479,8 +447,7 @@ WantedBy=multi-user.target
``` ```
or add in `/etc/fstab` a line like or add in `/etc/fstab` a line like
```
```console
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0 sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
``` ```
@@ -529,10 +496,8 @@ directory should be considered up to date and not refreshed from the
backend. Changes made through the VFS will appear immediately or backend. Changes made through the VFS will appear immediately or
invalidate the cache. invalidate the cache.
```text
--dir-cache-time duration Time to cache directory entries for (default 5m0s) --dir-cache-time duration Time to cache directory entries for (default 5m0s)
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s) --poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
```
However, changes made directly on the cloud storage by the web However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once interface or a different copy of rclone will only be picked up once
@@ -544,22 +509,16 @@ You can send a `SIGHUP` signal to rclone for it to flush all
directory caches, regardless of how old they are. Assuming only one directory caches, regardless of how old they are. Assuming only one
rclone instance is running, you can reset the cache like this: rclone instance is running, you can reset the cache like this:
```console kill -SIGHUP $(pidof rclone)
kill -SIGHUP $(pidof rclone)
```
If you configure rclone with a [remote control](/rc) then you can use If you configure rclone with a [remote control](/rc) then you can use
rclone rc to flush the whole directory cache: rclone rc to flush the whole directory cache:
```console rclone rc vfs/forget
rclone rc vfs/forget
```
Or individual files or directories: Or individual files or directories:
```console rclone rc vfs/forget file=path/to/file dir=path/to/dir
rclone rc vfs/forget file=path/to/file dir=path/to/dir
```
## VFS File Buffering ## VFS File Buffering
@@ -590,7 +549,6 @@ write simultaneously to a file. See below for more details.
Note that the VFS cache is separate from the cache backend and you may Note that the VFS cache is separate from the cache backend and you may
find that you need one or the other or both. find that you need one or the other or both.
```text
--cache-dir string Directory rclone will use for caching. --cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off) --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
@@ -598,7 +556,6 @@ find that you need one or the other or both.
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off) --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s) --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache (default 5s) --vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
```
If run with `-vv` rclone will print the location of the file cache. The If run with `-vv` rclone will print the location of the file cache. The
files are stored in the user cache file area which is OS dependent but files are stored in the user cache file area which is OS dependent but
@@ -646,13 +603,13 @@ directly to the remote without caching anything on disk.
This will mean some operations are not possible This will mean some operations are not possible
- Files can't be opened for both read AND write * Files can't be opened for both read AND write
- Files opened for write can't be seeked * Files opened for write can't be seeked
- Existing files opened for write must have O_TRUNC set * Existing files opened for write must have O_TRUNC set
- Files open for read with O_TRUNC will be opened write only * Files open for read with O_TRUNC will be opened write only
- Files open for write only will behave as if O_TRUNC was supplied * Files open for write only will behave as if O_TRUNC was supplied
- Open modes O_APPEND, O_TRUNC are ignored * Open modes O_APPEND, O_TRUNC are ignored
- If an upload fails it can't be retried * If an upload fails it can't be retried
### --vfs-cache-mode minimal ### --vfs-cache-mode minimal
@@ -662,10 +619,10 @@ write will be a lot more compatible, but uses the minimal disk space.
These operations are not possible These operations are not possible
- Files opened for write only can't be seeked * Files opened for write only can't be seeked
- Existing files opened for write must have O_TRUNC set * Existing files opened for write must have O_TRUNC set
- Files opened for write only will ignore O_APPEND, O_TRUNC * Files opened for write only will ignore O_APPEND, O_TRUNC
- If an upload fails it can't be retried * If an upload fails it can't be retried
### --vfs-cache-mode writes ### --vfs-cache-mode writes
@@ -748,11 +705,9 @@ read, at the cost of an increased number of requests.
These flags control the chunking: These flags control the chunking:
```text
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
--vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-chunk-streams int The number of parallel streams to read at once
```
The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter.
@@ -766,9 +721,9 @@ value is "off", which is the default, the limit is disabled and the chunk size
will grow indefinitely. will grow indefinitely.
With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0` With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0`
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
and so on. When `--vfs-read-chunk-size-limit 500M` is specified, the result would When `--vfs-read-chunk-size-limit 500M` is specified, the result would be
be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on. 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading.
@@ -806,41 +761,32 @@ In particular S3 and Swift benefit hugely from the `--no-modtime` flag
(or use `--use-server-modtime` for a slightly different effect) as each (or use `--use-server-modtime` for a slightly different effect) as each
read of the modification time takes a transaction. read of the modification time takes a transaction.
```text
--no-checksum Don't compare checksums on up/download. --no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up). --no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files. --no-seek Don't allow seeking in files.
--read-only Only allow read-only access. --read-only Only allow read-only access.
```
Sometimes rclone is delivered reads or writes out of order. Rather Sometimes rclone is delivered reads or writes out of order. Rather
than seeking rclone will wait a short time for the in sequence read or than seeking rclone will wait a short time for the in sequence read or
write to come in. These flags only come into effect when not using an write to come in. These flags only come into effect when not using an
on disk cache file. on disk cache file.
```text
--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s) --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
```
When using VFS write caching (`--vfs-cache-mode` with value writes or full), When using VFS write caching (`--vfs-cache-mode` with value writes or full),
the global flag `--transfers` can be set to adjust the number of parallel uploads the global flag `--transfers` can be set to adjust the number of parallel uploads of
of modified files from the cache (the related global flag `--checkers` has no modified files from the cache (the related global flag `--checkers` has no effect on the VFS).
effect on the VFS).
```text
--transfers int Number of file transfers to run in parallel (default 4) --transfers int Number of file transfers to run in parallel (default 4)
```
## Symlinks ## Symlinks
By default the VFS does not support symlinks. However this may be By default the VFS does not support symlinks. However this may be
enabled with either of the following flags: enabled with either of the following flags:
```text
--links Translate symlinks to/from regular files with a '.rclonelink' extension. --links Translate symlinks to/from regular files with a '.rclonelink' extension.
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS --vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
```
As most cloud storage systems do not support symlinks directly, rclone As most cloud storage systems do not support symlinks directly, rclone
stores the symlink as a normal file with a special extension. So a stores the symlink as a normal file with a special extension. So a
@@ -852,8 +798,7 @@ Note that `--links` enables symlink translation globally in rclone -
this includes any backend which supports the concept (for example the this includes any backend which supports the concept (for example the
local backend). `--vfs-links` just enables it for the VFS layer. local backend). `--vfs-links` just enables it for the VFS layer.
This scheme is compatible with that used by the This scheme is compatible with that used by the [local backend with the --local-links flag](/local/#symlinks-junction-points).
[local backend with the --local-links flag](/local/#symlinks-junction-points).
The `--vfs-links` flag has been designed for `rclone mount`, `rclone The `--vfs-links` flag has been designed for `rclone mount`, `rclone
nfsmount` and `rclone serve nfs`. nfsmount` and `rclone serve nfs`.
@@ -863,7 +808,7 @@ It hasn't been tested with the other `rclone serve` commands yet.
A limitation of the current implementation is that it expects the A limitation of the current implementation is that it expects the
caller to resolve sub-symlinks. For example given this directory tree caller to resolve sub-symlinks. For example given this directory tree
```text ```
. .
├── dir ├── dir
│   └── file.txt │   └── file.txt
@@ -941,9 +886,7 @@ sync`.
This flag allows you to manually set the statistics about the filing system. This flag allows you to manually set the statistics about the filing system.
It can be useful when those statistics cannot be read correctly automatically. It can be useful when those statistics cannot be read correctly automatically.
```text
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1) --vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
```
## Alternate report of used bytes ## Alternate report of used bytes
@@ -954,7 +897,7 @@ With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size` information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself. and compute the total used space itself.
**WARNING**: Contrary to `rclone size`, this flag ignores filters so that the _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching. calls resulting in extra charges. Use it as a last resort and only with caching.
@@ -972,7 +915,7 @@ Note that some backends won't create metadata unless you pass in the
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata` For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
we get we get
```console ```
$ ls -l /mnt/ $ ls -l /mnt/
total 1048577 total 1048577
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G -rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
@@ -997,6 +940,8 @@ If the file has no metadata it will be returned as `{}` and if there
is an error reading the metadata the error will be returned as is an error reading the metadata the error will be returned as
`{"error":"error string"}`. `{"error":"error string"}`.
``` ```
rclone nfsmount remote:path /path/to/mountpoint [flags] rclone nfsmount remote:path /path/to/mountpoint [flags]
``` ```
@@ -1072,7 +1017,7 @@ See the [global flags page](/flags/) for global options not listed here.
Flags for filtering directory listings Flags for filtering directory listings
```text ```
--delete-excluded Delete files on dest excluded from sync --delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern --exclude stringArray Exclude files matching pattern
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin) --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
@@ -1100,10 +1045,5 @@ Flags for filtering directory listings
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

View File

@@ -13,8 +13,9 @@ Obscure password for use in the rclone config file.
In the rclone config file, human-readable passwords are In the rclone config file, human-readable passwords are
obscured. Obscuring them is done by encrypting them and writing them obscured. Obscuring them is done by encrypting them and writing them
out in base64. This is **not** a secure way of encrypting these out in base64. This is **not** a secure way of encrypting these
passwords as rclone can decrypt them - it is to prevent "eyedropping" - passwords as rclone can decrypt them - it is to prevent "eyedropping"
namely someone seeing a password in the rclone config file by accident. - namely someone seeing a password in the rclone config file by
accident.
Many equally important things (like access tokens) are not obscured in Many equally important things (like access tokens) are not obscured in
the config file. However it is very hard to shoulder surf a 64 the config file. However it is very hard to shoulder surf a 64
@@ -24,9 +25,7 @@ This command can also accept a password through STDIN instead of an
argument by passing a hyphen as an argument. This will use the first argument by passing a hyphen as an argument. This will use the first
line of STDIN as the password not including the trailing newline. line of STDIN as the password not including the trailing newline.
```console echo "secretpassword" | rclone obscure -
echo "secretpassword" | rclone obscure -
```
If there is no data on STDIN to read, rclone obscure will default to If there is no data on STDIN to read, rclone obscure will default to
obfuscating the hyphen itself. obfuscating the hyphen itself.
@@ -49,10 +48,5 @@ See the [global flags page](/flags/) for global options not listed here.
## See Also ## See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
<!-- markdownlint-restore -->

Some files were not shown because too many files have changed in this diff Show More