mirror of
https://github.com/rclone/rclone.git
synced 2026-01-29 07:43:45 +00:00
Compare commits
103 Commits
fix-8980-d
...
tiers
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
136f76b245 | ||
|
|
36f6a3fa1a | ||
|
|
944dcc3e35 | ||
|
|
65c60c1d30 | ||
|
|
d9c1a96071 | ||
|
|
824257583c | ||
|
|
cd857754c7 | ||
|
|
017d930dfc | ||
|
|
6ed68883fe | ||
|
|
2a5e0b28c9 | ||
|
|
264e75d892 | ||
|
|
4553c3de7b | ||
|
|
2e9e4a47a2 | ||
|
|
e44829a448 | ||
|
|
6b9f77459a | ||
|
|
4b2c39c585 | ||
|
|
dbf2499d85 | ||
|
|
760a134c95 | ||
|
|
63cfe260a2 | ||
|
|
3550275cd3 | ||
|
|
b728929f44 | ||
|
|
9ec918f137 | ||
|
|
3a9c7ceeb1 | ||
|
|
5502c0f8ae | ||
|
|
d707ae7cf4 | ||
|
|
9bef7f0dbf | ||
|
|
933bbf3ac8 | ||
|
|
ecc5972d6f | ||
|
|
07805796ab | ||
|
|
189e6dbf6a | ||
|
|
d47e289165 | ||
|
|
e51a0599a0 | ||
|
|
530a901de3 | ||
|
|
a64a8aad0e | ||
|
|
6529d2cd8f | ||
|
|
d9895fef9d | ||
|
|
8c7b7ac891 | ||
|
|
f814498561 | ||
|
|
5f4e4b1a20 | ||
|
|
28c187b9b4 | ||
|
|
e07afc4645 | ||
|
|
08932ab92a | ||
|
|
356ee57edb | ||
|
|
7c1660214d | ||
|
|
51b197c86f | ||
|
|
029ffd2761 | ||
|
|
f81cd7d279 | ||
|
|
1a0a4628d7 | ||
|
|
c10a4d465c | ||
|
|
3a6e07a613 | ||
|
|
c36f99d343 | ||
|
|
3e21a7261b | ||
|
|
fd439fab62 | ||
|
|
976aa6b416 | ||
|
|
b3a0383ca3 | ||
|
|
c13f129339 | ||
|
|
748d8c8957 | ||
|
|
4d379efcbb | ||
|
|
e5e6a4b5ae | ||
|
|
df18e8c55b | ||
|
|
f4e17d8b0b | ||
|
|
e5c69511bc | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b | ||
|
|
233fef5c4d | ||
|
|
b9586c3e03 | ||
|
|
0dc0ab1330 | ||
|
|
a6bbdb35a0 | ||
|
|
b33cb77b6c | ||
|
|
d51322bb5f | ||
|
|
e718ab6091 | ||
|
|
0a9e6e130f | ||
|
|
3358b9049c | ||
|
|
847734d421 | ||
|
|
f7b255d4ec | ||
|
|
24c752ed9e | ||
|
|
a99d155fd4 | ||
|
|
f72b32b470 | ||
|
|
9be7f99bf8 | ||
|
|
6858bf242e | ||
|
|
e8c6867e4c | ||
|
|
50fbd6b049 | ||
|
|
0783cab952 | ||
|
|
886ac7af1d | ||
|
|
3c40238f02 | ||
|
|
46ca0dd7fe | ||
|
|
2e968e7ce0 | ||
|
|
1886c552db | ||
|
|
38ab3dd5b1 | ||
|
|
1d02e1219a | ||
|
|
035d3f344c | ||
|
|
7d45aee70f | ||
|
|
f30789180d | ||
|
|
7cb05a84e9 | ||
|
|
6d4c625bfb | ||
|
|
4eccc40168 | ||
|
|
e451f9c999 | ||
|
|
321488441e | ||
|
|
bd99e05ff0 | ||
|
|
6440052fbd |
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -216,7 +216,7 @@ jobs:
|
||||
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -229,7 +229,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
@@ -307,7 +307,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
df -h .
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
df -h .
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
@@ -17,6 +17,14 @@ linters:
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
exclusions:
|
||||
rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid meaningless package names'
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid package names that conflict with Go standard library package names'
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
@@ -136,6 +144,7 @@ linters:
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
@@ -632,14 +632,22 @@ Add your backend to the docs - you'll need to pick an icon for it from
|
||||
alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
First add a data file about your backend in
|
||||
`docs/data/backends/remote.yaml` - this is used to build the overview
|
||||
tables and the tiering info.
|
||||
|
||||
- Create it with: `bin/manage_backends.py create docs/data/backends/remote.yaml`
|
||||
- Edit it to fill in the blanks. Look at the [tiers docs](https://rclone.org/tiers/).
|
||||
- Run this command to fill in the features: `bin/manage_backends.py features docs/data/backends/remote.yaml`
|
||||
|
||||
Next edit these files:
|
||||
|
||||
- `README.md` - main GitHub page
|
||||
- `docs/content/remote.md` - main docs page (note the backend options are
|
||||
automatically added to this file with `make backenddocs`)
|
||||
- make sure this has the `autogenerated options` comments in (see your
|
||||
reference backend docs)
|
||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||
- `docs/content/overview.md` - overview docs - add an entry into the Features
|
||||
table and the Optional Features table.
|
||||
- `docs/content/docs.md` - list of remotes in config section
|
||||
- `docs/content/_index.md` - front page of rclone.org
|
||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
|
||||
15185
MANUAL.html
generated
15185
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7896
MANUAL.txt
generated
7896
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -28,21 +28,25 @@ directories to and from different cloud storage providers.
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
- Filen [:page_facing_up:](https://rclone.org/filen/)
|
||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
@@ -109,6 +113,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
- make doc
|
||||
- git status - to check for new man pages - git add them
|
||||
- git commit -a -v -m "Version v1.XX.0"
|
||||
- make check
|
||||
- make retag
|
||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
- git push --follow-tags origin
|
||||
|
||||
@@ -16,11 +16,13 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drime"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filen"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
@@ -55,6 +57,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/shade"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
@@ -63,7 +66,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/ulozto"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
|
||||
@@ -86,12 +86,56 @@ var (
|
||||
metadataMu sync.Mutex
|
||||
)
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"cache-control": {
|
||||
Help: "Cache-Control header",
|
||||
Type: "string",
|
||||
Example: "no-cache",
|
||||
},
|
||||
"content-disposition": {
|
||||
Help: "Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "inline",
|
||||
},
|
||||
"content-encoding": {
|
||||
Help: "Content-Encoding header",
|
||||
Type: "string",
|
||||
Example: "gzip",
|
||||
},
|
||||
"content-language": {
|
||||
Help: "Content-Language header",
|
||||
Type: "string",
|
||||
Example: "en-US",
|
||||
},
|
||||
"content-type": {
|
||||
Help: "Content-Type header",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
"tier": {
|
||||
Help: "Tier of the object",
|
||||
Type: "string",
|
||||
Example: "Hot",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, read from rclone metadata",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "azureblob",
|
||||
Description: "Microsoft Azure Blob Storage",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: `Azure Storage Account Name.
|
||||
@@ -810,6 +854,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
@@ -1157,6 +1204,289 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// parseXMsTags parses the value of the x-ms-tags header into a map.
|
||||
// It expects comma-separated key=value pairs. Whitespace around keys and
|
||||
// values is trimmed. Empty pairs and empty keys are rejected.
|
||||
func parseXMsTags(s string) (map[string]string, error) {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return map[string]string{}, nil
|
||||
}
|
||||
out := make(map[string]string)
|
||||
parts := strings.Split(s, ",")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
kv := strings.SplitN(p, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("invalid tag %q", p)
|
||||
}
|
||||
k := strings.TrimSpace(kv[0])
|
||||
v := strings.TrimSpace(kv[1])
|
||||
if k == "" {
|
||||
return nil, fmt.Errorf("invalid tag key in %q", p)
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers,
|
||||
// user metadata, tags and optional modTime override.
|
||||
// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata.
|
||||
//
|
||||
// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime).
|
||||
func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) {
|
||||
if meta == nil {
|
||||
return headers, nil, nil, nil, nil
|
||||
}
|
||||
tmp := make(map[string]string)
|
||||
for k, v := range meta {
|
||||
lowerKey := strings.ToLower(k)
|
||||
switch lowerKey {
|
||||
case "cache-control":
|
||||
headers.BlobCacheControl = pString(v)
|
||||
case "content-disposition":
|
||||
headers.BlobContentDisposition = pString(v)
|
||||
case "content-encoding":
|
||||
headers.BlobContentEncoding = pString(v)
|
||||
case "content-language":
|
||||
headers.BlobContentLanguage = pString(v)
|
||||
case "content-type":
|
||||
headers.BlobContentType = pString(v)
|
||||
case "x-ms-tags":
|
||||
parsed, perr := parseXMsTags(v)
|
||||
if perr != nil {
|
||||
return headers, nil, nil, nil, perr
|
||||
}
|
||||
// allocate only if there are tags
|
||||
if len(parsed) > 0 {
|
||||
tags = parsed
|
||||
}
|
||||
case "mtime":
|
||||
// Accept multiple layouts for tolerance
|
||||
var parsed time.Time
|
||||
var pErr error
|
||||
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} {
|
||||
parsed, pErr = time.Parse(layout, v)
|
||||
if pErr == nil {
|
||||
modTime = &parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
// Log and ignore if unparseable
|
||||
if modTime == nil && logf != nil {
|
||||
logf("metadata: couldn't parse mtime %q: %v", v, pErr)
|
||||
}
|
||||
case "tier":
|
||||
// ignore - handled elsewhere
|
||||
default:
|
||||
// Filter out other reserved headers so they don't end up as user metadata
|
||||
if strings.HasPrefix(lowerKey, "x-ms-") {
|
||||
continue
|
||||
}
|
||||
tmp[lowerKey] = v
|
||||
}
|
||||
}
|
||||
userMeta = toAzureMetaPtr(tmp)
|
||||
return headers, userMeta, tags, modTime, nil
|
||||
}
|
||||
|
||||
// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK
|
||||
func toAzureMetaPtr(in map[string]string) map[string]*string {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(map[string]*string, len(in))
|
||||
for k, v := range in {
|
||||
vv := v
|
||||
out[k] = &vv
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||
//
|
||||
// It starts from the source properties, optionally overlays mapped metadata
|
||||
// from rclone's metadata options, ensures mtime presence when mapping is
|
||||
// enabled, and returns whether mapping was actually requested (hadMapping).
|
||||
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||
//
|
||||
// If includeBaseMeta is true, start user metadata from the source's metadata
|
||||
// and overlay mapped values. This matches multipart copy commit behavior.
|
||||
// If false, only include mapped user metadata (no source baseline) which
|
||||
// matches previous singlepart StartCopyFromURL semantics.
|
||||
func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) {
|
||||
// Start from source properties
|
||||
headers = blob.HTTPHeaders{
|
||||
BlobCacheControl: srcProps.CacheControl,
|
||||
BlobContentDisposition: srcProps.ContentDisposition,
|
||||
BlobContentEncoding: srcProps.ContentEncoding,
|
||||
BlobContentLanguage: srcProps.ContentLanguage,
|
||||
BlobContentMD5: srcProps.ContentMD5,
|
||||
BlobContentType: srcProps.ContentType,
|
||||
}
|
||||
// Optionally deep copy user metadata pointers from source. Normalise keys to
|
||||
// lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay
|
||||
// metadata (Azure treats keys case-insensitively but Go's http.Header will
|
||||
// join duplicate keys into a comma separated list, which breaks shared-key
|
||||
// signing).
|
||||
if includeBaseMeta && len(srcProps.Metadata) > 0 {
|
||||
meta = make(map[string]*string, len(srcProps.Metadata))
|
||||
for k, v := range srcProps.Metadata {
|
||||
if v != nil {
|
||||
vv := *v
|
||||
meta[strings.ToLower(k)] = &vv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only consider mapping if metadata pipeline is enabled
|
||||
if fs.GetConfig(ctx).Metadata {
|
||||
mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if mapErr != nil {
|
||||
return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr)
|
||||
}
|
||||
if mapped != nil {
|
||||
// Map rclone metadata to Azure shapes
|
||||
mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) })
|
||||
if herr != nil {
|
||||
return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr)
|
||||
}
|
||||
hadMapping = true
|
||||
// Overlay headers (only non-nil)
|
||||
if mappedHeaders.BlobCacheControl != nil {
|
||||
headers.BlobCacheControl = mappedHeaders.BlobCacheControl
|
||||
}
|
||||
if mappedHeaders.BlobContentDisposition != nil {
|
||||
headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition
|
||||
}
|
||||
if mappedHeaders.BlobContentEncoding != nil {
|
||||
headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding
|
||||
}
|
||||
if mappedHeaders.BlobContentLanguage != nil {
|
||||
headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage
|
||||
}
|
||||
if mappedHeaders.BlobContentType != nil {
|
||||
headers.BlobContentType = mappedHeaders.BlobContentType
|
||||
}
|
||||
// Overlay user metadata
|
||||
if len(userMeta) > 0 {
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, len(userMeta))
|
||||
}
|
||||
for k, v := range userMeta {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
// Apply tags if any
|
||||
if len(mappedTags) > 0 {
|
||||
tags = mappedTags
|
||||
}
|
||||
// Ensure mtime present using mapped or source time
|
||||
if _, ok := meta[modTimeKey]; !ok {
|
||||
when := src.ModTime(ctx)
|
||||
if mappedModTime != nil {
|
||||
when = *mappedModTime
|
||||
}
|
||||
val := when.Format(time.RFC3339Nano)
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, 1)
|
||||
}
|
||||
meta[modTimeKey] = &val
|
||||
}
|
||||
// Ensure content-type fallback to source if not set by mapper
|
||||
if headers.BlobContentType == nil {
|
||||
headers.BlobContentType = srcProps.ContentType
|
||||
}
|
||||
} else {
|
||||
// Mapping enabled but not provided: ensure mtime present based on source ModTime
|
||||
if _, ok := meta[modTimeKey]; !ok {
|
||||
when := src.ModTime(ctx)
|
||||
val := when.Format(time.RFC3339Nano)
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, 1)
|
||||
}
|
||||
meta[modTimeKey] = &val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return headers, meta, tags, hadMapping, nil
|
||||
}
|
||||
|
||||
// applyMappedMetadata applies mapped metadata and headers to the object state for uploads.
|
||||
//
|
||||
// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions
|
||||
// and updates o.meta, o.tags and ui.httpHeaders accordingly.
|
||||
func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) {
|
||||
// Start from the source modtime; may be overridden by metadata
|
||||
modTime = src.ModTime(ctx)
|
||||
|
||||
// Fetch mapped metadata if --metadata is enabled
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return modTime, err
|
||||
}
|
||||
if meta == nil {
|
||||
// No metadata processing requested
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// Map metadata using common helper
|
||||
headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) })
|
||||
if err != nil {
|
||||
return modTime, err
|
||||
}
|
||||
// Merge headers into ui
|
||||
if headers.BlobCacheControl != nil {
|
||||
ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl
|
||||
}
|
||||
if headers.BlobContentDisposition != nil {
|
||||
ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition
|
||||
}
|
||||
if headers.BlobContentEncoding != nil {
|
||||
ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding
|
||||
}
|
||||
if headers.BlobContentLanguage != nil {
|
||||
ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage
|
||||
}
|
||||
if headers.BlobContentType != nil {
|
||||
ui.httpHeaders.BlobContentType = headers.BlobContentType
|
||||
}
|
||||
|
||||
// Apply user metadata to o.meta with a single critical section
|
||||
if len(userMeta) > 0 {
|
||||
metadataMu.Lock()
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, len(userMeta))
|
||||
}
|
||||
for k, v := range userMeta {
|
||||
if v != nil {
|
||||
o.meta[k] = *v
|
||||
}
|
||||
}
|
||||
metadataMu.Unlock()
|
||||
}
|
||||
|
||||
// Apply tags
|
||||
if len(tags) > 0 {
|
||||
if o.tags == nil {
|
||||
o.tags = make(map[string]string, len(tags))
|
||||
}
|
||||
for k, v := range tags {
|
||||
o.tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if mappedModTime != nil {
|
||||
modTime = *mappedModTime
|
||||
}
|
||||
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
@@ -1951,18 +2281,19 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert metadata from source object
|
||||
// Prepare metadata/headers/tags for destination
|
||||
// For multipart commit, include base metadata from source then overlay mapped
|
||||
commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||
}
|
||||
|
||||
// Convert metadata from source or mapper
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: srcProperties.Metadata,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobCacheControl: srcProperties.CacheControl,
|
||||
BlobContentDisposition: srcProperties.ContentDisposition,
|
||||
BlobContentEncoding: srcProperties.ContentEncoding,
|
||||
BlobContentLanguage: srcProperties.ContentLanguage,
|
||||
BlobContentMD5: srcProperties.ContentMD5,
|
||||
BlobContentType: srcProperties.ContentType,
|
||||
},
|
||||
Metadata: commitMeta,
|
||||
Tags: commitTags,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
HTTPHeaders: &commitHeaders,
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
@@ -1993,10 +2324,36 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||
}
|
||||
|
||||
// Start the copy
|
||||
// Prepare mapped metadata/tags/headers if requested
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
}
|
||||
var postHeaders *blob.HTTPHeaders
|
||||
// Read source properties and assemble params; this also handles the case when mapping is disabled
|
||||
srcProps, err := src.readMetaDataAlways(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: read source properties: %w", err)
|
||||
}
|
||||
// For singlepart copy, do not include base metadata from source in StartCopyFromURL
|
||||
headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false)
|
||||
if aerr != nil {
|
||||
return nil, fmt.Errorf("single part copy: %w", aerr)
|
||||
}
|
||||
// Apply tags and post-copy headers only when mapping requested changes
|
||||
if len(tags) > 0 {
|
||||
options.BlobTags = make(map[string]string, len(tags))
|
||||
for k, v := range tags {
|
||||
options.BlobTags[k] = v
|
||||
}
|
||||
}
|
||||
if hadMapping {
|
||||
// Only set metadata explicitly when mapping was requested; otherwise
|
||||
// let the service copy source metadata (including mtime) automatically.
|
||||
if len(meta) > 0 {
|
||||
options.Metadata = meta
|
||||
}
|
||||
postHeaders = &headers
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
||||
@@ -2026,6 +2383,16 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
// If mapper requested header changes, set them post-copy
|
||||
if postHeaders != nil {
|
||||
blb := f.getBlobSVC(dstContainer, dstPath)
|
||||
_, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil)
|
||||
if setErr != nil {
|
||||
return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr)
|
||||
}
|
||||
}
|
||||
// Metadata (when requested) is set via StartCopyFromURL options.Metadata
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
@@ -2157,6 +2524,35 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
return metadata
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It returns a combined view of system and user metadata.
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
// Ensure metadata is loaded
|
||||
if err := o.readMetaData(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := fs.Metadata{}
|
||||
|
||||
// System metadata we expose
|
||||
if !o.modTime.IsZero() {
|
||||
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
if o.accessTier != "" {
|
||||
m["tier"] = string(o.accessTier)
|
||||
}
|
||||
|
||||
// Merge user metadata (already lower-cased keys)
|
||||
metadataMu.Lock()
|
||||
for k, v := range o.meta {
|
||||
m[k] = v
|
||||
}
|
||||
metadataMu.Unlock()
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
@@ -2995,17 +3391,19 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// containerPath = containerPath[:len(containerPath)-1]
|
||||
// }
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
// Start with default content-type based on source
|
||||
ui.httpHeaders = blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Apply mapped metadata/headers/tags if requested
|
||||
modTime, err := o.applyMappedMetadata(ctx, src, &ui, options)
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
// Ensure mtime is set in metadata based on possibly overridden modTime
|
||||
o.updateMetadataWithModTime(modTime)
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
|
||||
@@ -5,11 +5,16 @@ package azureblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -148,4 +153,417 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
t.Run("Metadata", f.testMetadataPaths)
|
||||
}
|
||||
|
||||
// helper to read blob properties for an object
|
||||
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
|
||||
ao := o.(*Object)
|
||||
props, err := ao.readMetaDataAlways(ctx)
|
||||
require.NoError(t, err)
|
||||
return props
|
||||
}
|
||||
|
||||
// helper to assert select headers and user metadata
|
||||
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
|
||||
// Headers
|
||||
get := func(p *string) string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return *p
|
||||
}
|
||||
if v, ok := want["content-type"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentType), "content-type")
|
||||
}
|
||||
if v, ok := want["cache-control"]; ok {
|
||||
assert.Equal(t, v, get(props.CacheControl), "cache-control")
|
||||
}
|
||||
if v, ok := want["content-disposition"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
|
||||
}
|
||||
if v, ok := want["content-encoding"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
|
||||
}
|
||||
if v, ok := want["content-language"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
|
||||
}
|
||||
// User metadata (case-insensitive keys from service)
|
||||
norm := make(map[string]*string, len(props.Metadata))
|
||||
for kk, vv := range props.Metadata {
|
||||
norm[strings.ToLower(kk)] = vv
|
||||
}
|
||||
for k, v := range wantUserMeta {
|
||||
pv, ok := norm[strings.ToLower(k)]
|
||||
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
|
||||
if pv == nil {
|
||||
assert.Equal(t, v, "", k)
|
||||
} else {
|
||||
assert.Equal(t, v, *pv, k)
|
||||
}
|
||||
} else {
|
||||
// Log available keys for diagnostics
|
||||
keys := make([]string, 0, len(props.Metadata))
|
||||
for kk := range props.Metadata {
|
||||
keys = append(keys, kk)
|
||||
}
|
||||
t.Logf("available user metadata keys: %v", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper to read blob tags for an object
|
||||
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
|
||||
ao := o.(*Object)
|
||||
blb := ao.getBlobSVC()
|
||||
resp, err := blb.GetTags(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
out := make(map[string]string)
|
||||
for _, tag := range resp.BlobTagSet {
|
||||
if tag.Key != nil {
|
||||
k := *tag.Key
|
||||
v := ""
|
||||
if tag.Value != nil {
|
||||
v = *tag.Value
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Test metadata across different write paths
|
||||
func (f *Fs) testMetadataPaths(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
|
||||
// Common expected metadata and headers
|
||||
baseMeta := fs.Metadata{
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
// Note: Don't set content-encoding here to avoid download decoding differences
|
||||
// We will set a custom user metadata key
|
||||
"potato": "royal",
|
||||
// and modtime
|
||||
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
|
||||
}
|
||||
|
||||
// Singlepart upload
|
||||
t.Run("PutSinglepart", func(t *testing.T) {
|
||||
// size less than chunk size
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
// override content-type via metadata mapping
|
||||
meta := fs.Metadata{}
|
||||
meta.Merge(baseMeta)
|
||||
meta["content-type"] = "text/plain"
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
props := getProps(ctx, t, obj)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
}, map[string]string{
|
||||
"potato": "royal",
|
||||
})
|
||||
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
|
||||
})
|
||||
|
||||
// Multipart upload
|
||||
t.Run("PutMultipart", func(t *testing.T) {
|
||||
// size greater than chunk size to force multipart
|
||||
contents := random.String(int(f.opt.ChunkSize + 1024))
|
||||
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{}
|
||||
meta.Merge(baseMeta)
|
||||
meta["content-type"] = "application/json"
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
props := getProps(ctx, t, obj)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/json",
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
}, map[string]string{
|
||||
"potato": "royal",
|
||||
})
|
||||
|
||||
// Tags: Singlepart upload
|
||||
t.Run("PutSinglepartTags", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "env=dev,team=sync",
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
tags := getTagsMap(ctx, t, obj)
|
||||
assert.Equal(t, "dev", tags["env"])
|
||||
assert.Equal(t, "sync", tags["team"])
|
||||
})
|
||||
|
||||
// Tags: Multipart upload
|
||||
t.Run("PutMultipartTags", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize + 2048))
|
||||
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "project=alpha,release=2025-08",
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
tags := getTagsMap(ctx, t, obj)
|
||||
assert.Equal(t, "alpha", tags["project"])
|
||||
assert.Equal(t, "2025-08", tags["release"])
|
||||
})
|
||||
})
|
||||
|
||||
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||
t.Run("CopySinglepart", func(t *testing.T) {
|
||||
// create small source
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"cache-control": "private, max-age=60",
|
||||
"content-disposition": "attachment; filename=foo.txt",
|
||||
"content-language": "fr",
|
||||
// no content-type: should fallback to source
|
||||
"potato": "maris",
|
||||
}
|
||||
|
||||
// do copy
|
||||
dstName := "meta-copy-single-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
// content-type should fallback to source (text/plain)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
"cache-control": "private, max-age=60",
|
||||
"content-disposition": "attachment; filename=foo.txt",
|
||||
"content-language": "fr",
|
||||
}, map[string]string{
|
||||
"potato": "maris",
|
||||
})
|
||||
// mtime should be populated on copy when --metadata is used
|
||||
// and should equal the source ModTime (RFC3339Nano)
|
||||
// Read user metadata (case-insensitive)
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
// parse and compare times ignoring formatting differences
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
|
||||
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
|
||||
dstName := "meta-copy-single-only-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
}, map[string]string{})
|
||||
// Assert mtime injected
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||
t.Run("CopyMultipart", func(t *testing.T) {
|
||||
// create large source to force multipart
|
||||
contents := random.String(int(f.opt.CopyCutoff + 1024))
|
||||
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"cache-control": "max-age=0, no-cache",
|
||||
// omit content-type to trigger fallback
|
||||
"content-language": "de",
|
||||
"potato": "desiree",
|
||||
}
|
||||
|
||||
dstName := "meta-copy-multi-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
// content-type should fallback to source (application/octet-stream)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/octet-stream",
|
||||
"cache-control": "max-age=0, no-cache",
|
||||
"content-language": "de",
|
||||
}, map[string]string{
|
||||
"potato": "desiree",
|
||||
})
|
||||
// mtime should be populated on copy when --metadata is used
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
|
||||
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.CopyCutoff + 2048))
|
||||
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
|
||||
dstName := "meta-copy-multi-only-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/octet-stream",
|
||||
}, map[string]string{})
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// Tags: Singlepart copy
|
||||
t.Run("CopySinglepartTags", func(t *testing.T) {
|
||||
// create small source
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet including tags
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"x-ms-tags": "copy=single,mode=test",
|
||||
}
|
||||
|
||||
dstName := "tags-copy-single-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
tags := getTagsMap(ctx2, t, dst)
|
||||
assert.Equal(t, "single", tags["copy"])
|
||||
assert.Equal(t, "test", tags["mode"])
|
||||
})
|
||||
|
||||
// Tags: Multipart copy
|
||||
t.Run("CopyMultipartTags", func(t *testing.T) {
|
||||
// create large source to force multipart
|
||||
contents := random.String(int(f.opt.CopyCutoff + 4096))
|
||||
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"x-ms-tags": "copy=multi,mode=test",
|
||||
}
|
||||
|
||||
dstName := "tags-copy-multi-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
tags := getTagsMap(ctx2, t, dst)
|
||||
assert.Equal(t, "multi", tags["copy"])
|
||||
assert.Equal(t, "test", tags["mode"])
|
||||
})
|
||||
|
||||
// Negative: invalid x-ms-tags must error
|
||||
t.Run("InvalidXMsTags", func(t *testing.T) {
|
||||
contents := random.String(32)
|
||||
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
// construct ObjectInfo with invalid x-ms-tags
|
||||
buf := strings.NewReader(contents)
|
||||
// Build obj info with metadata
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "badpair-without-equals",
|
||||
}
|
||||
// force metadata on
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
|
||||
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
|
||||
_, err := f.Put(ctx2, buf, obji)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid tag")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -133,23 +133,32 @@ type File struct {
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
// StorageAPI is as returned from the b2_authorize_account call
|
||||
type StorageAPI struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||
ID string `json:"id"` // ID of bucket
|
||||
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||
} `json:"buckets"`
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||
Storage StorageAPI `json:"storageApi"`
|
||||
} `json:"apiInfo"`
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
|
||||
131
backend/b2/b2.go
131
backend/b2/b2.go
@@ -607,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
// If this is a key limited to one or more buckets, one of them must exist
|
||||
// and be ours.
|
||||
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||
var rootFound = false
|
||||
var rootID string
|
||||
for _, b := range buckets {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||
if allowedBucket == "" {
|
||||
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if allowedBucket == f.rootBucket {
|
||||
rootFound = true
|
||||
rootID = b.ID
|
||||
}
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
if !rootFound {
|
||||
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
f.setBucketID(f.rootBucket, rootID)
|
||||
}
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -643,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
Path: "/b2api/v4/b2_authorize_account",
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
@@ -656,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -1067,44 +1079,83 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||
|
||||
call := func(id string) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: id,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses = append(responses, response)
|
||||
return nil
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
err := call(b.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
|
||||
err := call("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f.bucketIDMutex.Lock()
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketID = make(map[string]string, 1)
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
}
|
||||
f.bucketTypeMutex.Unlock()
|
||||
f.bucketIDMutex.Unlock()
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err = fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err := fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1606,7 +1657,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.DownloadURL
|
||||
RootURL = f.info.APIs.Storage.DownloadURL
|
||||
} else {
|
||||
RootURL = f.opt.DownloadURL
|
||||
}
|
||||
@@ -1957,7 +2008,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := strings.Index(ciphertext, ".")
|
||||
if pos == -1 {
|
||||
before, after, ok := strings.Cut(ciphertext, ".")
|
||||
if !ok {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
} // No .
|
||||
num := ciphertext[:pos]
|
||||
num := before
|
||||
if num == "!" {
|
||||
// No rotation; probably original was not valid unicode
|
||||
return ciphertext[pos+1:], nil
|
||||
return after, nil
|
||||
}
|
||||
dir, err := strconv.Atoi(num)
|
||||
if err != nil {
|
||||
@@ -425,7 +425,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
var result bytes.Buffer
|
||||
|
||||
inQuote := false
|
||||
for _, runeValue := range ciphertext[pos+1:] {
|
||||
for _, runeValue := range after {
|
||||
switch {
|
||||
case inQuote:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
|
||||
@@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
|
||||
237
backend/drime/api/types.go
Normal file
237
backend/drime/api/types.go
Normal file
@@ -0,0 +1,237 @@
|
||||
// Package api has type definitions for drime
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
)
|
||||
|
||||
// User information
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID json.Number `json:"id"`
|
||||
Avatar string `json:"avatar"`
|
||||
ModelType string `json:"model_type"`
|
||||
OwnsEntry bool `json:"owns_entry"`
|
||||
EntryPermissions []any `json:"entry_permissions"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
|
||||
// Permissions for a file
|
||||
type Permissions struct {
|
||||
FilesUpdate bool `json:"files.update"`
|
||||
FilesCreate bool `json:"files.create"`
|
||||
FilesDownload bool `json:"files.download"`
|
||||
FilesDelete bool `json:"files.delete"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by /drive/file-entries
|
||||
type Item struct {
|
||||
ID json.Number `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description any `json:"description"`
|
||||
FileName string `json:"file_name"`
|
||||
Mime string `json:"mime"`
|
||||
Color any `json:"color"`
|
||||
Backup bool `json:"backup"`
|
||||
Tracked int `json:"tracked"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
UserID json.Number `json:"user_id"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DeletedAt any `json:"deleted_at"`
|
||||
IsDeleted int `json:"is_deleted"`
|
||||
Path string `json:"path"`
|
||||
DiskPrefix any `json:"disk_prefix"`
|
||||
Type string `json:"type"`
|
||||
Extension any `json:"extension"`
|
||||
FileHash any `json:"file_hash"`
|
||||
Public bool `json:"public"`
|
||||
Thumbnail bool `json:"thumbnail"`
|
||||
MuxStatus any `json:"mux_status"`
|
||||
ThumbnailURL any `json:"thumbnail_url"`
|
||||
WorkspaceID int `json:"workspace_id"`
|
||||
IsEncrypted int `json:"is_encrypted"`
|
||||
Iv any `json:"iv"`
|
||||
VaultID any `json:"vault_id"`
|
||||
OwnerID int `json:"owner_id"`
|
||||
Hash string `json:"hash"`
|
||||
URL string `json:"url"`
|
||||
Users []User `json:"users"`
|
||||
Tags []any `json:"tags"`
|
||||
Permissions Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Listing response
|
||||
type Listing struct {
|
||||
CurrentPage int `json:"current_page"`
|
||||
Data []Item `json:"data"`
|
||||
From int `json:"from"`
|
||||
LastPage int `json:"last_page"`
|
||||
NextPage int `json:"next_page"`
|
||||
PerPage int `json:"per_page"`
|
||||
PrevPage int `json:"prev_page"`
|
||||
To int `json:"to"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// UploadResponse for a file
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest for a folder
|
||||
type CreateFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
ParentID json.Number `json:"parentId,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse for a folder
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Folder Item `json:"folder"`
|
||||
}
|
||||
|
||||
// Error is returned from drime when things go wrong
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Message)
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// DeleteRequest is the input to DELETE /file-entries
|
||||
type DeleteRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DeleteForever bool `json:"deleteForever"`
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /file-entries
|
||||
type DeleteResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Errors map[string]string `json:"errors"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /file-entries/{id}/
|
||||
type UpdateItemResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /file-entries/move
|
||||
type MoveRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /file-entries/move
|
||||
type MoveResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /file-entries/duplicate
|
||||
type CopyRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /file-entries/duplicate
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// MultiPartCreateRequest is the input of POST /s3/multipart/create
|
||||
type MultiPartCreateRequest struct {
|
||||
Filename string `json:"filename"`
|
||||
Mime string `json:"mime"`
|
||||
Size int64 `json:"size"`
|
||||
Extension string `json:"extension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartCreateResponse is returned by POST /s3/multipart/create
|
||||
type MultiPartCreateResponse struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string `json:"ETag"`
|
||||
PartNumber int32 `json:"PartNumber"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
PartNumbers []int `json:"partNumbers"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsResponse struct {
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
PartNumber int32 `json:"partNumber"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
|
||||
type MultiPartCompleteRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
Parts []CompletedPart `json:"parts"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
|
||||
type MultiPartCompleteResponse struct {
|
||||
Location string `json:"location"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesRequest is the input to POST /s3/entries
|
||||
type MultiPartEntriesRequest struct {
|
||||
ClientMime string `json:"clientMime"`
|
||||
ClientName string `json:"clientName"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
ClientExtension string `json:"clientExtension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesResponse is the result of POST /s3/entries
|
||||
type MultiPartEntriesResponse struct {
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MultiPartAbort is the input of POST /s3/multipart/abort
|
||||
type MultiPartAbort struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
1563
backend/drime/drime.go
Normal file
1563
backend/drime/drime.go
Normal file
File diff suppressed because it is too large
Load Diff
33
backend/drime/drime_test.go
Normal file
33
backend/drime/drime_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Drime filesystem interface
|
||||
package drime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrime:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
@@ -733,17 +733,6 @@ two accounts.
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: "metadata_enforce_expansive_access",
|
||||
Help: `Whether the request should enforce expansive access rules.
|
||||
|
||||
From Feb 2026 this flag will be set by default so this flag can be used for
|
||||
testing before then.
|
||||
|
||||
See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -823,7 +812,6 @@ type Options struct {
|
||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||
EnforceExpansiveAccess bool `config:"metadata_enforce_expansive_access"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
@@ -3104,7 +3092,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
_, err = f.svc.Permissions.Create(id, permission).
|
||||
Fields("").
|
||||
SupportsAllDrives(true).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -149,7 +149,6 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||
SupportsAllDrives(true).
|
||||
SendNotificationEmail(false).
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -484,7 +483,6 @@ func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err
|
||||
SupportsAllDrives(true).
|
||||
TransferOwnership(true).
|
||||
// SendNotificationEmail(false). - required apparently!
|
||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
1178
backend/filen/filen.go
Normal file
1178
backend/filen/filen.go
Normal file
File diff suppressed because it is too large
Load Diff
14
backend/filen/filen_test.go
Normal file
14
backend/filen/filen_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package filen
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilen:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -204,6 +204,12 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -892,7 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
go func(c *ftp.ServerConn) {
|
||||
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -900,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return
|
||||
}
|
||||
resultchan <- result
|
||||
}()
|
||||
}(c)
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||
|
||||
@@ -346,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Name: "endpoint",
|
||||
Help: `Custom endpoint for the storage API. Leave blank to use the provider default.
|
||||
|
||||
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
|
||||
the subpath will be ignored during upload operations due to a limitation in the
|
||||
underlying Google API Go client library.
|
||||
Download and listing operations will work correctly with the full endpoint path.
|
||||
If you require subpath support for uploads, avoid using subpaths in your custom
|
||||
endpoint configuration.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.example.org",
|
||||
Help: "Specify a custom endpoint",
|
||||
}, {
|
||||
Value: "storage.example.org:4443",
|
||||
Help: "Specifying a custom endpoint with port",
|
||||
}, {
|
||||
Value: "storage.example.org:4443/gcs/api",
|
||||
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
|
||||
@@ -17,12 +17,10 @@ Improvements:
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -256,25 +254,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
|
||||
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||
// Relevant issues:
|
||||
// https://github.com/rclone/rclone/issues/8565
|
||||
// https://github.com/meganz/webclient/issues/103
|
||||
clt := fshttp.NewClient(ctx)
|
||||
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
})
|
||||
srv = mega.New().SetClient(clt)
|
||||
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -24,7 +25,8 @@ import (
|
||||
var (
|
||||
hashType = hash.MD5
|
||||
// the object storage is persistent
|
||||
buckets = newBucketsInfo()
|
||||
buckets = newBucketsInfo()
|
||||
errWriteOnly = errors.New("can't read when using --memory-discard")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -33,12 +35,32 @@ func init() {
|
||||
Name: "memory",
|
||||
Description: "In memory object storage system.",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{},
|
||||
Options: []fs.Option{{
|
||||
Name: "discard",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `If set all writes will be discarded and reads will return an error
|
||||
|
||||
If set then when files are uploaded the contents not be saved. The
|
||||
files will appear to have been uploaded but will give an error on
|
||||
read. Files will have their MD5 sum calculated on upload which takes
|
||||
very little CPU time and allows the transfers to be checked.
|
||||
|
||||
This can be useful for testing performance.
|
||||
|
||||
Probably most easily used by using the connection string syntax:
|
||||
|
||||
:memory,discard:bucket
|
||||
|
||||
`,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct{}
|
||||
type Options struct {
|
||||
Discard bool `config:"discard"`
|
||||
}
|
||||
|
||||
// Fs represents a remote memory server
|
||||
type Fs struct {
|
||||
@@ -164,6 +186,7 @@ type objectData struct {
|
||||
hash string
|
||||
mimeType string
|
||||
data []byte
|
||||
size int64
|
||||
}
|
||||
|
||||
// Object describes a memory object
|
||||
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.od.hash == "" {
|
||||
if o.od.hash == "" && !o.fs.opt.Discard {
|
||||
sum := md5.Sum(o.od.data)
|
||||
o.od.hash = hex.EncodeToString(sum[:])
|
||||
}
|
||||
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(len(o.od.data))
|
||||
return o.od.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.Discard {
|
||||
return nil, errWriteOnly
|
||||
}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
data, err := io.ReadAll(in)
|
||||
var data []byte
|
||||
var size int64
|
||||
var hash string
|
||||
if o.fs.opt.Discard {
|
||||
h := md5.New()
|
||||
size, err = io.Copy(h, in)
|
||||
hash = hex.EncodeToString(h.Sum(nil))
|
||||
} else {
|
||||
data, err = io.ReadAll(in)
|
||||
size = int64(len(data))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
o.od = &objectData{
|
||||
data: data,
|
||||
hash: "",
|
||||
size: size,
|
||||
hash: hash,
|
||||
modTime: src.ModTime(ctx),
|
||||
mimeType: fs.MimeType(ctx, src),
|
||||
}
|
||||
|
||||
@@ -403,7 +403,7 @@ This is why this flag is not set as the default.
|
||||
|
||||
As a rule of thumb if nearly all of your data is under rclone's root
|
||||
directory (the |root/directory| in |onedrive:root/directory|) then
|
||||
using this flag will be be a big performance win. If your data is
|
||||
using this flag will be a big performance win. If your data is
|
||||
mostly not under the root then using this flag will be a big
|
||||
performance loss.
|
||||
|
||||
|
||||
@@ -60,9 +60,6 @@ type StateChangeConf struct {
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
@@ -84,9 +81,11 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
result := Result{}
|
||||
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
|
||||
@@ -222,3 +222,11 @@ type UserInfo struct {
|
||||
} `json:"steps"`
|
||||
} `json:"journey"`
|
||||
}
|
||||
|
||||
// DiffResult is the response from /diff
|
||||
type DiffResult struct {
|
||||
Result int `json:"result"`
|
||||
DiffID int64 `json:"diffid"`
|
||||
Entries []map[string]any `json:"entries"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
@@ -171,6 +171,7 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
lastDiffID int64 // change tracking state for diff long-polling
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeNotify implements fs.Features.ChangeNotify
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Start long-poll loop in background
|
||||
go f.changeNotifyLoop(ctx, notify, ch)
|
||||
}
|
||||
|
||||
// changeNotifyLoop contains the blocking long-poll logic.
|
||||
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Standard polling interval
|
||||
interval := 30 * time.Second
|
||||
|
||||
// Start with diffID = 0 to get the current state
|
||||
var diffID int64
|
||||
|
||||
// Helper to process changes from the diff API
|
||||
handleChanges := func(entries []map[string]any) {
|
||||
notifiedPaths := make(map[string]bool)
|
||||
|
||||
for _, entry := range entries {
|
||||
meta, ok := entry["metadata"].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Robust extraction of ParentFolderID
|
||||
var pid int64
|
||||
if val, ok := meta["parentfolderid"]; ok {
|
||||
switch v := val.(type) {
|
||||
case float64:
|
||||
pid = int64(v)
|
||||
case int64:
|
||||
pid = v
|
||||
case int:
|
||||
pid = int64(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the path using dirCache.GetInv
|
||||
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
|
||||
dirID := fmt.Sprintf("d%d", pid)
|
||||
parentPath, ok := f.dirCache.GetInv(dirID)
|
||||
|
||||
if !ok {
|
||||
// Parent not in cache, so we can ignore this change as it is outside
|
||||
// of what the mount has seen or cares about.
|
||||
continue
|
||||
}
|
||||
|
||||
name, _ := meta["name"].(string)
|
||||
fullPath := path.Join(parentPath, name)
|
||||
|
||||
// Determine EntryType (File or Directory)
|
||||
entryType := fs.EntryObject
|
||||
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
|
||||
entryType = fs.EntryDirectory
|
||||
}
|
||||
|
||||
// Deduplicate notifications for this batch
|
||||
if !notifiedPaths[fullPath] {
|
||||
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
|
||||
notify(fullPath, entryType)
|
||||
notifiedPaths[fullPath] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Check context and channel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case newInterval, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
interval = newInterval
|
||||
default:
|
||||
}
|
||||
|
||||
// Setup /diff Request
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/diff",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
if diffID != 0 {
|
||||
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
|
||||
opts.Parameters.Set("block", "1")
|
||||
} else {
|
||||
opts.Parameters.Set("last", "0")
|
||||
}
|
||||
|
||||
// Perform Long-Poll
|
||||
// Timeout set to 90s (server usually blocks for 60s max)
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||
var result api.DiffResult
|
||||
|
||||
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
// Ignore timeout errors as they are normal for long-polling
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If result is not 0, reset DiffID to resync
|
||||
if result.Result != 0 {
|
||||
diffID = 0
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if result.DiffID != 0 {
|
||||
diffID = result.DiffID
|
||||
f.lastDiffID = diffID
|
||||
}
|
||||
|
||||
if len(result.Entries) > 0 {
|
||||
handleChanges(result.Entries)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
@@ -1327,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||
}
|
||||
@@ -1401,6 +1533,7 @@ var (
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
for i := range iVal.NumField() {
|
||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||
}
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
protonDriveAPI "github.com/rclone/Proton-API-Bridge"
|
||||
"github.com/rclone/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
|
||||
15
backend/s3/provider/BizflyCloud.yaml
Normal file
15
backend/s3/provider/BizflyCloud.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: BizflyCloud
|
||||
description: Bizfly Cloud Simple Storage
|
||||
region:
|
||||
hn: Ha Noi
|
||||
hcm: Ho Chi Minh
|
||||
endpoint:
|
||||
hn.ss.bfcplatform.vn: Hanoi endpoint
|
||||
hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
@@ -1,26 +1,26 @@
|
||||
name: Linode
|
||||
description: Linode Object Storage
|
||||
endpoint:
|
||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
||||
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
|
||||
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
|
||||
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
|
||||
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
|
||||
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
|
||||
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
|
||||
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
|
||||
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
|
||||
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
|
||||
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
|
||||
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
|
||||
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
|
||||
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
|
||||
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
|
||||
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
|
||||
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
|
||||
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
|
||||
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
|
||||
@@ -2,7 +2,17 @@ name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
ru-3: St. Petersburg
|
||||
ru-7: Moscow
|
||||
gis-1: Moscow
|
||||
kz-1: Kazakhstan
|
||||
uz-2: Uzbekistan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.ru-1.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-3.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.kz-1.storage.selcloud.ru: Kazakhstan
|
||||
s3.uz-2.storage.selcloud.ru: Uzbekistan
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
|
||||
@@ -30,9 +30,11 @@ import (
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
@@ -325,6 +327,30 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "role_arn",
|
||||
Help: `ARN of the IAM role to assume.
|
||||
|
||||
Leave blank if not using assume role.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_session_name",
|
||||
Help: `Session name for assumed role.
|
||||
|
||||
If empty, a session name will be generated automatically.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_session_duration",
|
||||
Help: `Session duration for assumed role.
|
||||
|
||||
If empty, the default session duration will be used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_external_id",
|
||||
Help: `External ID for assumed role.
|
||||
|
||||
Leave blank if not using an external ID.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads and copies.
|
||||
@@ -927,6 +953,10 @@ type Options struct {
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
SessionToken string `config:"session_token"`
|
||||
RoleARN string `config:"role_arn"`
|
||||
RoleSessionName string `config:"role_session_name"`
|
||||
RoleSessionDuration fs.Duration `config:"role_session_duration"`
|
||||
RoleExternalID string `config:"role_external_id"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
@@ -1290,6 +1320,34 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
|
||||
// Handle assume role if RoleARN is specified
|
||||
if opt.RoleARN != "" {
|
||||
fs.Debugf(nil, "Using assume role with ARN: %s", opt.RoleARN)
|
||||
|
||||
// Set region for the config before creating STS client
|
||||
awsConfig.Region = opt.Region
|
||||
|
||||
// Create STS client using the base credentials
|
||||
stsClient := sts.NewFromConfig(awsConfig)
|
||||
|
||||
// Configure AssumeRole options
|
||||
assumeRoleOptions := func(aro *stscreds.AssumeRoleOptions) {
|
||||
// Set session name if provided, otherwise use a default
|
||||
if opt.RoleSessionName != "" {
|
||||
aro.RoleSessionName = opt.RoleSessionName
|
||||
}
|
||||
if opt.RoleSessionDuration != 0 {
|
||||
aro.Duration = time.Duration(opt.RoleSessionDuration)
|
||||
}
|
||||
if opt.RoleExternalID != "" {
|
||||
aro.ExternalID = &opt.RoleExternalID
|
||||
}
|
||||
}
|
||||
|
||||
// Create AssumeRole credentials provider
|
||||
awsConfig.Credentials = stscreds.NewAssumeRoleProvider(stsClient, opt.RoleARN, assumeRoleOptions)
|
||||
}
|
||||
|
||||
provider = loadProvider(opt.Provider)
|
||||
if provider == nil {
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
@@ -1648,11 +1706,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -2832,6 +2893,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
UploadId: uid,
|
||||
IfMatch: copyReq.IfMatch,
|
||||
IfNoneMatch: copyReq.IfNoneMatch,
|
||||
})
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2865,14 +2928,23 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: types.MetadataDirectiveCopy,
|
||||
}
|
||||
if srcObj.storageClass != nil {
|
||||
req.StorageClass = types.StorageClass(*srcObj.storageClass)
|
||||
}
|
||||
// Build upload options including headers and metadata
|
||||
ci := fs.GetConfig(ctx)
|
||||
uploadOptions := fs.MetadataAsOpenOptions(ctx)
|
||||
for _, option := range ci.UploadHeaders {
|
||||
uploadOptions = append(uploadOptions, option)
|
||||
}
|
||||
|
||||
// Update the metadata if it is in use
|
||||
if ci := fs.GetConfig(ctx); ci.Metadata {
|
||||
ui, err := srcObj.prepareUpload(ctx, src, fs.MetadataAsOpenOptions(ctx), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
||||
ui, err := srcObj.prepareUpload(ctx, src, uploadOptions, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
||||
if ci.Metadata {
|
||||
req.MetadataDirective = types.MetadataDirectiveReplace
|
||||
}
|
||||
|
||||
@@ -4281,6 +4353,8 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
|
||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
UploadId: w.uploadID,
|
||||
IfMatch: w.ui.req.IfMatch,
|
||||
IfNoneMatch: w.ui.req.IfNoneMatch,
|
||||
})
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -4429,7 +4503,12 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
ACL: types.ObjectCannedACL(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
}
|
||||
|
||||
if tierObj, ok := src.(fs.GetTierer); ok {
|
||||
tier := tierObj.GetTier()
|
||||
if tier != "" {
|
||||
ui.req.StorageClass = types.StorageClass(strings.ToUpper(tier))
|
||||
}
|
||||
}
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
|
||||
@@ -70,6 +70,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
||||
// setFrom_typesObject_typesObjectVersion copies matching elements from a to b
|
||||
func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVersion) {
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ChecksumType = b.ChecksumType
|
||||
a.ETag = b.ETag
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
@@ -82,6 +83,7 @@ func setFrom_typesObject_typesObjectVersion(a *types.Object, b *types.ObjectVers
|
||||
func setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(a *s3.CreateMultipartUploadInput, b *s3.HeadObjectOutput) {
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumType = b.ChecksumType
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
@@ -160,12 +162,15 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumCRC64NVME = b.ChecksumCRC64NVME
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ChecksumType = b.ChecksumType
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentLength = b.ContentLength
|
||||
a.ContentRange = b.ContentRange
|
||||
a.ContentType = b.ContentType
|
||||
a.DeleteMarker = b.DeleteMarker
|
||||
a.ETag = b.ETag
|
||||
@@ -187,6 +192,7 @@ func setFrom_s3HeadObjectOutput_s3GetObjectOutput(a *s3.HeadObjectOutput, b *s3.
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.TagCount = b.TagCount
|
||||
a.VersionId = b.VersionId
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
a.ResultMetadata = b.ResultMetadata
|
||||
@@ -232,6 +238,7 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumCRC32 = b.ChecksumCRC32
|
||||
a.ChecksumCRC32C = b.ChecksumCRC32C
|
||||
a.ChecksumCRC64NVME = b.ChecksumCRC64NVME
|
||||
a.ChecksumSHA1 = b.ChecksumSHA1
|
||||
a.ChecksumSHA256 = b.ChecksumSHA256
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
@@ -270,6 +277,8 @@ func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.Put
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.IfMatch = b.IfMatch
|
||||
a.IfNoneMatch = b.IfNoneMatch
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
|
||||
@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -519,6 +519,12 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -919,15 +925,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
// Set up sshConfig here from opt
|
||||
// **NB** everything else should be setup in NewFsWithConnection
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
@@ -1175,11 +1174,21 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
@@ -1249,7 +1258,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
|
||||
27
backend/shade/api/types.go
Normal file
27
backend/shade/api/types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package api has type definitions for shade
|
||||
package api
|
||||
|
||||
// ListDirResponse -------------------------------------------------
|
||||
// Format from shade api
|
||||
type ListDirResponse struct {
|
||||
Type string `json:"type"` // "file" or "tree"
|
||||
Path string `json:"path"` // Full path including root
|
||||
Ino int `json:"ino"` // inode number
|
||||
Mtime int64 `json:"mtime"` // Modified time in milliseconds
|
||||
Ctime int64 `json:"ctime"` // Created time in milliseconds
|
||||
Size int64 `json:"size"` // Size in bytes
|
||||
Hash string `json:"hash"` // MD5 hash
|
||||
Draft bool `json:"draft"` // Whether this is a draft file
|
||||
}
|
||||
|
||||
// PartURL Type for multipart upload/download
|
||||
type PartURL struct {
|
||||
URL string `json:"url"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string
|
||||
PartNumber int32
|
||||
}
|
||||
1039
backend/shade/shade.go
Normal file
1039
backend/shade/shade.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/shade/shade_test.go
Normal file
21
backend/shade/shade_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package shade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestShade"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*shade.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
336
backend/shade/upload.go
Normal file
336
backend/shade/upload.go
Normal file
@@ -0,0 +1,336 @@
|
||||
//multipart upload for shade
|
||||
|
||||
package shade
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
type shadeChunkWriter struct {
|
||||
initToken string
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
o *Object
|
||||
completedParts []api.CompletedPart
|
||||
completedPartsMu sync.Mutex
|
||||
}
|
||||
|
||||
// uploadMultipart handles multipart upload for larger files
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var shadeWriter = chunkWriter.(*shadeChunkWriter)
|
||||
o.size = shadeWriter.size
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
fs.FixRangeOption(options, size)
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 640 GB.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
token, err := o.fs.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to get token: %w", err)
|
||||
}
|
||||
|
||||
err = f.ensureParentDirectories(ctx, remote)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
|
||||
}
|
||||
|
||||
fullPath := remote
|
||||
if f.root != "" {
|
||||
fullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
// Initiate multipart upload
|
||||
type initRequest struct {
|
||||
Path string `json:"path"`
|
||||
PartSize int64 `json:"partSize"`
|
||||
}
|
||||
reqBody := initRequest{
|
||||
Path: fullPath,
|
||||
PartSize: int64(chunkSize),
|
||||
}
|
||||
|
||||
var initResp struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
|
||||
RootURL: o.fs.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunkWriter := &shadeChunkWriter{
|
||||
initToken: initResp.Token,
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: f.opt.Concurrency,
|
||||
LeavePartsOnError: false,
|
||||
}
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read chunk
|
||||
var chunk bytes.Buffer
|
||||
n, err := io.Copy(&chunk, reader)
|
||||
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read chunk: %w", err)
|
||||
}
|
||||
// Get presigned URL for this part
|
||||
var partURL api.PartURL
|
||||
|
||||
partOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get part URL: %w", err)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: partURL.URL,
|
||||
Body: &chunk,
|
||||
ContentType: "",
|
||||
ContentLength: &n,
|
||||
}
|
||||
|
||||
// Add headers
|
||||
var uploadRes *http.Response
|
||||
if len(partURL.Headers) > 0 {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
for k, v := range partURL.Headers {
|
||||
opts.ExtraHeaders[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
uploadRes, err = s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
|
||||
}
|
||||
|
||||
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(uploadRes.Body)
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Get ETag from response
|
||||
etag := uploadRes.Header.Get("ETag")
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
|
||||
s.completedPartsMu.Lock()
|
||||
defer s.completedPartsMu.Unlock()
|
||||
s.completedParts = append(s.completedParts, api.CompletedPart{
|
||||
PartNumber: int32(chunkNumber + 1),
|
||||
ETag: etag,
|
||||
})
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close complete chunked writer finalising the file.
|
||||
func (s *shadeChunkWriter) Close(ctx context.Context) error {
|
||||
|
||||
// Complete multipart upload
|
||||
sort.Slice(s.completedParts, func(i, j int) bool {
|
||||
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
|
||||
})
|
||||
|
||||
type completeRequest struct {
|
||||
Parts []api.CompletedPart `json:"parts"`
|
||||
}
|
||||
var completeBody completeRequest
|
||||
|
||||
if s.completedParts == nil {
|
||||
completeBody = completeRequest{Parts: []api.CompletedPart{}}
|
||||
} else {
|
||||
completeBody = completeRequest{Parts: s.completedParts}
|
||||
}
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
completeOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
var response http.Response
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
|
||||
|
||||
if err != nil && res == nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusTooManyRequests {
|
||||
return true, err // Retry on 429
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort chunk write
|
||||
//
|
||||
// You can and should call Abort without calling Close.
|
||||
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
|
||||
return false, nil // Don't retry abort
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: strings.ReplaceAll(`Above this size files will be chunked.
|
||||
|
||||
Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
|
||||
Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container
|
||||
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
|
||||
for more info). Default for this is 5 GiB which is its maximum value, which
|
||||
means only files above this size will be chunked.
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
// Package api provides types used by the Uptobox API.
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error contains the error code and message returned by the API
|
||||
type Error struct {
|
||||
Success bool `json:"success,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("api error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Data != "" {
|
||||
out += ": " + e.Data
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// FolderEntry represents a Uptobox subfolder when listing folder contents
|
||||
type FolderEntry struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Description string `json:"fld_descr"`
|
||||
Password string `json:"fld_password"`
|
||||
FullPath string `json:"fullPath"`
|
||||
Path string `json:"fld_name"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
// FolderInfo represents the current folder when listing folder contents
|
||||
type FolderInfo struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Hash string `json:"hash"`
|
||||
FileCount uint64 `json:"fileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
}
|
||||
|
||||
// FileInfo represents a file when listing folder contents
|
||||
type FileInfo struct {
|
||||
Name string `json:"file_name"`
|
||||
Description string `json:"file_descr"`
|
||||
Created string `json:"file_created"`
|
||||
Size int64 `json:"file_size"`
|
||||
Downloads uint64 `json:"file_downloads"`
|
||||
Code string `json:"file_code"`
|
||||
Password string `json:"file_password"`
|
||||
Public int `json:"file_public"`
|
||||
LastDownload string `json:"file_last_download"`
|
||||
ID uint64 `json:"id"`
|
||||
}
|
||||
|
||||
// ReadMetadataResponse is the response when listing folder contents
|
||||
type ReadMetadataResponse struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
CurrentFolder FolderInfo `json:"currentFolder"`
|
||||
Folders []FolderEntry `json:"folders"`
|
||||
Files []FileInfo `json:"files"`
|
||||
PageCount int `json:"pageCount"`
|
||||
TotalFileCount int `json:"totalFileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadInfo is the response when initiating an upload
|
||||
type UploadInfo struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
UploadLink string `json:"uploadLink"`
|
||||
MaxUpload string `json:"maxUpload"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
DeleteURL string `json:"deleteUrl"`
|
||||
} `json:"files"`
|
||||
}
|
||||
|
||||
// UpdateResponse is a generic response to various action on files (rename/copy/move)
|
||||
type UpdateResponse struct {
|
||||
Message string `json:"message"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
}
|
||||
|
||||
// Download is the response when requesting a download link
|
||||
type Download struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
DownloadLink string `json:"dlLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// MetadataRequestOptions represents all the options when listing folder contents
|
||||
type MetadataRequestOptions struct {
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
SearchField string
|
||||
Search string
|
||||
}
|
||||
|
||||
// CreateFolderRequest is used for creating a folder
|
||||
type CreateFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DeleteFolderRequest is used for deleting a folder
|
||||
type DeleteFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
}
|
||||
|
||||
// CopyMoveFileRequest is used for moving/copying a file
|
||||
type CopyMoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// MoveFolderRequest is used for moving a folder
|
||||
type MoveFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// RenameFolderRequest is used for renaming a folder
|
||||
type RenameFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// UpdateFileInformation is used for renaming a file
|
||||
type UpdateFileInformation struct {
|
||||
Token string `json:"token"`
|
||||
FileCode string `json:"file_code"`
|
||||
NewName string `json:"new_name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Public string `json:"public,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is used for deleting a file
|
||||
type RemoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
}
|
||||
|
||||
// Token represents the authentication token
|
||||
type Token struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
||||
// Test Uptobox filesystem interface
|
||||
package uptobox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/uptobox"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestUptobox:"
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*uptobox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,9 +43,11 @@ docs = [
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"drime.md"
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
"filen.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
@@ -84,11 +86,11 @@ docs = [
|
||||
"protondrive.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"shade.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"ulozto.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
300
bin/manage_backends.py
Executable file
300
bin/manage_backends.py
Executable file
@@ -0,0 +1,300 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Manage the backend yaml files in docs/data/backends
|
||||
|
||||
usage: manage_backends.py [-h] {create,features,update,help} [files ...]
|
||||
|
||||
Manage rclone backend YAML files.
|
||||
|
||||
positional arguments:
|
||||
{create,features,update,help}
|
||||
Action to perform
|
||||
files List of YAML files to operate on
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import yaml
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
import socket
|
||||
from contextlib import contextmanager
|
||||
from pprint import pprint
|
||||
|
||||
# --- Configuration ---
|
||||
|
||||
# The order in which keys should appear in the YAML file
|
||||
CANONICAL_ORDER = [
|
||||
"backend",
|
||||
"name",
|
||||
"tier",
|
||||
"maintainers",
|
||||
"features_score",
|
||||
"integration_tests",
|
||||
"data_integrity",
|
||||
"performance",
|
||||
"adoption",
|
||||
"docs",
|
||||
"security",
|
||||
"virtual",
|
||||
"remote",
|
||||
"features",
|
||||
"hashes",
|
||||
"precision"
|
||||
]
|
||||
|
||||
# Default values for fields when creating/updating
|
||||
DEFAULTS = {
|
||||
"tier": None,
|
||||
"maintainers": None,
|
||||
"features_score": None,
|
||||
"integration_tests": None,
|
||||
"data_integrity": None,
|
||||
"performance": None,
|
||||
"adoption": None,
|
||||
"docs": None,
|
||||
"security": None,
|
||||
"virtual": False,
|
||||
"remote": None,
|
||||
"features": [],
|
||||
"hashes": [],
|
||||
"precision": None
|
||||
}
|
||||
|
||||
# --- Test server management ---
|
||||
|
||||
def wait_for_tcp(address_str, delay=1, timeout=2, tries=60):
|
||||
"""
|
||||
Blocks until the specified TCP address (e.g., '172.17.0.3:21') is reachable.
|
||||
"""
|
||||
host, port = address_str.split(":")
|
||||
port = int(port)
|
||||
print(f"Waiting for {host}:{port}...")
|
||||
for tri in range(tries):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||
sock.settimeout(timeout)
|
||||
result = sock.connect_ex((host, port))
|
||||
if result == 0:
|
||||
print(f"Connected to {host}:{port} successfully!")
|
||||
break
|
||||
else:
|
||||
print(f"Failed to connect to {host}:{port} try {tri} !")
|
||||
time.sleep(delay)
|
||||
|
||||
def parse_init_output(binary_input):
|
||||
"""
|
||||
Parse the output of the init script
|
||||
"""
|
||||
decoded_str = binary_input.decode('utf-8')
|
||||
result = {}
|
||||
for line in decoded_str.splitlines():
|
||||
if '=' in line:
|
||||
key, value = line.split('=', 1)
|
||||
result[key.strip()] = value.strip()
|
||||
return result
|
||||
|
||||
@contextmanager
|
||||
def test_server(remote):
|
||||
"""Start the test server for remote if needed"""
|
||||
remote_name = remote.split(":",1)[0]
|
||||
init_script = "fstest/testserver/init.d/" + remote_name
|
||||
if not os.path.isfile(init_script):
|
||||
yield
|
||||
return
|
||||
print(f"--- Starting {init_script} ---")
|
||||
out = subprocess.check_output([init_script, "start"])
|
||||
out = parse_init_output(out)
|
||||
pprint(out)
|
||||
# Configure the server with environment variables
|
||||
env_keys = []
|
||||
for key, value in out.items():
|
||||
env_key = f"RCLONE_CONFIG_{remote_name.upper()}_{key.upper()}"
|
||||
env_keys.append(env_key)
|
||||
os.environ[env_key] = value
|
||||
for key,var in os.environ.items():
|
||||
if key.startswith("RCLON"):
|
||||
print(key, var)
|
||||
if "_connect" in out:
|
||||
wait_for_tcp(out["_connect"])
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
print(f"--- Stopping {init_script} ---")
|
||||
subprocess.run([init_script, "stop"], check=True)
|
||||
# Remove the env vars
|
||||
for env_key in env_keys:
|
||||
del os.environ[env_key]
|
||||
|
||||
# --- Helper Functions ---
|
||||
|
||||
def load_yaml(filepath):
|
||||
if not os.path.exists(filepath):
|
||||
return {}
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
return yaml.safe_load(f) or {}
|
||||
|
||||
def save_yaml(filepath, data):
|
||||
# Reconstruct dictionary in canonical order
|
||||
ordered_data = {}
|
||||
|
||||
# Add known keys in order
|
||||
for key in CANONICAL_ORDER:
|
||||
if key in data:
|
||||
ordered_data[key] = data[key]
|
||||
|
||||
# Add any other keys that might exist (custom fields)
|
||||
for key in data:
|
||||
if key not in CANONICAL_ORDER:
|
||||
ordered_data[key] = data[key]
|
||||
|
||||
# Ensure features are a sorted list (if present)
|
||||
if 'features' in ordered_data and isinstance(ordered_data['features'], list):
|
||||
ordered_data['features'].sort()
|
||||
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
yaml.dump(ordered_data, f, default_flow_style=False, sort_keys=False, allow_unicode=True)
|
||||
print(f"Saved {filepath}")
|
||||
|
||||
def get_backend_name_from_file(filepath):
|
||||
"""
|
||||
s3.yaml -> S3
|
||||
azureblob.yaml -> Azureblob
|
||||
"""
|
||||
basename = os.path.basename(filepath)
|
||||
name, _ = os.path.splitext(basename)
|
||||
return name.title()
|
||||
|
||||
def fetch_rclone_features(remote_str):
|
||||
"""
|
||||
Runs `rclone backend features remote:` and returns the JSON object.
|
||||
"""
|
||||
cmd = ["rclone", "backend", "features", remote_str]
|
||||
try:
|
||||
with test_server(remote_str):
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
return json.loads(result.stdout)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error running rclone: {e.stderr}")
|
||||
return None
|
||||
except FileNotFoundError:
|
||||
print("Error: 'rclone' command not found in PATH.")
|
||||
sys.exit(1)
|
||||
|
||||
# --- Verbs ---
|
||||
|
||||
def do_create(files):
|
||||
for filepath in files:
|
||||
if os.path.exists(filepath):
|
||||
print(f"Skipping {filepath} (already exists)")
|
||||
continue
|
||||
|
||||
data = DEFAULTS.copy()
|
||||
# Set a default name based on filename
|
||||
data['name'] = get_backend_name_from_file(filepath)
|
||||
save_yaml(filepath, data)
|
||||
|
||||
def do_update(files):
|
||||
for filepath in files:
|
||||
if not os.path.exists(filepath):
|
||||
print(f"Warning: {filepath} does not exist. Use 'create' first.")
|
||||
continue
|
||||
|
||||
data = load_yaml(filepath)
|
||||
modified = False
|
||||
|
||||
# Inject the filename as the 'backend'
|
||||
file_backend = os.path.splitext(os.path.basename(filepath))[0]
|
||||
|
||||
if data.get('backend') != file_backend:
|
||||
data['backend'] = file_backend
|
||||
modified = True
|
||||
print(f"[{filepath}] Updated backend to: {file_backend}")
|
||||
|
||||
# Add missing default fields
|
||||
for key, default_val in DEFAULTS.items():
|
||||
if key not in data:
|
||||
data[key] = default_val
|
||||
modified = True
|
||||
print(f"[{filepath}] Added missing field: {key}")
|
||||
|
||||
# Special handling for 'name' if it was just added as None or didn't exist
|
||||
if data.get('name') is None:
|
||||
data['name'] = get_backend_name_from_file(filepath)
|
||||
modified = True
|
||||
print(f"[{filepath}] Set default name: {data['name']}")
|
||||
|
||||
if modified:
|
||||
save_yaml(filepath, data)
|
||||
else:
|
||||
# We save anyway to enforce canonical order if the file was messy
|
||||
save_yaml(filepath, data)
|
||||
|
||||
def do_features(files):
|
||||
for filepath in files:
|
||||
if not os.path.exists(filepath):
|
||||
print(f"Error: {filepath} not found.")
|
||||
continue
|
||||
|
||||
data = load_yaml(filepath)
|
||||
remote = data.get('remote')
|
||||
|
||||
if not remote:
|
||||
print(f"Error: [{filepath}] 'remote' field is missing or empty. Cannot fetch features.")
|
||||
continue
|
||||
|
||||
print(f"[{filepath}] Fetching features for remote: '{remote}'...")
|
||||
rclone_data = fetch_rclone_features(remote)
|
||||
|
||||
if not rclone_data:
|
||||
print(f"Failed to fetch data for {filepath}")
|
||||
continue
|
||||
|
||||
# Process Features (Dict -> Sorted List of True keys)
|
||||
features_dict = rclone_data.get('Features', {})
|
||||
# Filter only true values and sort keys
|
||||
feature_list = sorted([k for k, v in features_dict.items() if v])
|
||||
|
||||
# Process Hashes
|
||||
hashes_list = rclone_data.get('Hashes', [])
|
||||
|
||||
# Process Precision
|
||||
precision = rclone_data.get('Precision')
|
||||
|
||||
# Update data
|
||||
data['features'] = feature_list
|
||||
data['hashes'] = hashes_list
|
||||
data['precision'] = precision
|
||||
|
||||
save_yaml(filepath, data)
|
||||
|
||||
# --- Main CLI ---
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Manage rclone backend YAML files.")
|
||||
parser.add_argument("verb", choices=["create", "features", "update", "help"], help="Action to perform")
|
||||
parser.add_argument("files", nargs="*", help="List of YAML files to operate on")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.verb == "help":
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
if not args.files:
|
||||
print("Error: No files specified.")
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
if args.verb == "create":
|
||||
do_create(args.files)
|
||||
elif args.verb == "update":
|
||||
do_update(args.files)
|
||||
elif args.verb == "features":
|
||||
do_features(args.files)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -389,8 +389,8 @@ func parseHash(str string) (string, string, error) {
|
||||
if str == "-" {
|
||||
return "", "", nil
|
||||
}
|
||||
if pos := strings.Index(str, ":"); pos > 0 {
|
||||
name, val := str[:pos], str[pos+1:]
|
||||
if before, after, ok := strings.Cut(str, ":"); ok {
|
||||
name, val := before, after
|
||||
if name != "" && val != "" {
|
||||
return name, val, nil
|
||||
}
|
||||
|
||||
@@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() {
|
||||
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
|
||||
// filesystem without additional parsing [1]. Our workaround is roughly to add
|
||||
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
|
||||
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
|
||||
// I'm not sure this generalizes, but it works for the kinds of inputs we're
|
||||
// throwing at it.
|
||||
//
|
||||
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
|
||||
|
||||
@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||
use |-R| to make them recurse.
|
||||
|
||||
List commands prefer a recursive method that uses more memory but fewer
|
||||
transactions by default. Use |--disable ListR| to suppress the behavior.
|
||||
See [|--fast-list|](/docs/#fast-list) for more details.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).`, "|", "`")
|
||||
|
||||
@@ -97,7 +97,7 @@ with the following options:
|
||||
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
||||
no directories.
|
||||
|
||||
If ` + "`--stat`" + ` is set then the the output is not an array of items,
|
||||
If ` + "`--stat`" + ` is set then the output is not an array of items,
|
||||
but instead a single JSON blob will be returned about the item pointed to.
|
||||
This will return an error if the item isn't found, however on bucket based
|
||||
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
||||
|
||||
@@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=m
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
` + "```" + `
|
||||
|
||||
The vfsOpt are as described in options/get and can be seen in the the
|
||||
The vfsOpt are as described in options/get and can be seen in the
|
||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||
|
||||
` + "```console" + `
|
||||
|
||||
@@ -34,7 +34,7 @@ argument by passing a hyphen as an argument. This will use the first
|
||||
line of STDIN as the password not including the trailing newline.
|
||||
|
||||
` + "```console" + `
|
||||
echo "secretpassword" | rclone obscure -
|
||||
echo 'secretpassword' | rclone obscure -
|
||||
` + "```" + `
|
||||
|
||||
If there is no data on STDIN to read, rclone obscure will default to
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestRun(t *testing.T) {
|
||||
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
|
||||
}
|
||||
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
|
||||
if privateKeyErr != nil {
|
||||
if publicKeyError != nil {
|
||||
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
`--auth-key` is not provided then `serve s3` will allow anonymous
|
||||
access.
|
||||
|
||||
Like all rclone flags `--auth-key` can be set via environment
|
||||
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
|
||||
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
|
||||
`accessKey,secretKey` has a comma in, this means it needs to be in
|
||||
quotes.
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user,pass"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Or to supply multiple identities:
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Setting this variable without quotes will produce an error.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
|
||||
@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w.s3Secret = getAuthSecret(opt.AuthKey)
|
||||
}
|
||||
|
||||
authList, err := authlistResolver(opt.AuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing auth list failed: %q", err)
|
||||
}
|
||||
|
||||
var newLogger logger
|
||||
w.faker = gofakes3.New(
|
||||
newBackend(w),
|
||||
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
gofakes3.WithLogger(newLogger),
|
||||
gofakes3.WithRequestID(rand.Uint64()),
|
||||
gofakes3.WithoutVersioning(),
|
||||
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
|
||||
gofakes3.WithV4Auth(authList),
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w._vfs = vfs.New(f, vfsOpt)
|
||||
|
||||
if len(opt.AuthKey) > 0 {
|
||||
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
|
||||
w.faker.AddAuthKeys(authList)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package s3
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
|
||||
}
|
||||
}
|
||||
|
||||
func authlistResolver(list []string) map[string]string {
|
||||
func authlistResolver(list []string) (map[string]string, error) {
|
||||
authList := make(map[string]string)
|
||||
for _, v := range list {
|
||||
parts := strings.Split(v, ",")
|
||||
if len(parts) != 2 {
|
||||
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
|
||||
continue
|
||||
return nil, errors.New("invalid auth pair: expecting a single comma")
|
||||
}
|
||||
authList[parts[0]] = parts[1]
|
||||
}
|
||||
return authList
|
||||
return authList, nil
|
||||
}
|
||||
|
||||
@@ -58,10 +58,10 @@ type conn struct {
|
||||
// interoperate with the rclone sftp backend
|
||||
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
|
||||
binary, args := command, ""
|
||||
space := strings.Index(command, " ")
|
||||
if space >= 0 {
|
||||
binary = command[:space]
|
||||
args = strings.TrimLeft(command[space+1:], " ")
|
||||
before, after, ok := strings.Cut(command, " ")
|
||||
if ok {
|
||||
binary = before
|
||||
args = strings.TrimLeft(after, " ")
|
||||
}
|
||||
args = shellUnEscape(args)
|
||||
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
|
||||
@@ -291,7 +291,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
||||
}
|
||||
}
|
||||
fs.Debugf(c.what, " - accepted: %v\n", ok)
|
||||
err = req.Reply(ok, reply)
|
||||
err := req.Reply(ok, reply)
|
||||
if err != nil {
|
||||
fs.Errorf(c.what, "Failed to Reply to request: %v", err)
|
||||
return
|
||||
|
||||
@@ -45,6 +45,10 @@ var OptionsInfo = fs.Options{{
|
||||
Name: "disable_dir_list",
|
||||
Default: false,
|
||||
Help: "Disable HTML directory list on GET request for a directory",
|
||||
}, {
|
||||
Name: "disable_zip",
|
||||
Default: false,
|
||||
Help: "Disable zip download of directories",
|
||||
}}.
|
||||
Add(libhttp.ConfigInfo).
|
||||
Add(libhttp.AuthConfigInfo).
|
||||
@@ -57,6 +61,7 @@ type Options struct {
|
||||
Template libhttp.TemplateConfig
|
||||
EtagHash string `config:"etag_hash"`
|
||||
DisableDirList bool `config:"disable_dir_list"`
|
||||
DisableZip bool `config:"disable_zip"`
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
@@ -408,6 +413,24 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
return
|
||||
}
|
||||
dir := node.(*vfs.Dir)
|
||||
|
||||
if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip {
|
||||
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
|
||||
zipName := path.Base(dirRemote)
|
||||
if dirRemote == "" {
|
||||
zipName = "root"
|
||||
}
|
||||
rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
|
||||
rw.Header().Set("Content-Type", "application/zip")
|
||||
rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
|
||||
err := vfs.CreateZip(ctx, dir, rw)
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, rw, "Failed to create zip", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
|
||||
if err != nil {
|
||||
@@ -417,6 +440,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
|
||||
directory.DisableZip = w.opt.DisableZip
|
||||
for _, node := range dirEntries {
|
||||
if vfscommon.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
|
||||
@@ -56,22 +56,22 @@ var speedCmd = &cobra.Command{
|
||||
Short: `Run a speed test to the remote`,
|
||||
Long: `Run a speed test to the remote.
|
||||
|
||||
This command runs a series of uploads and downloads to the remote, measuring
|
||||
and printing the speed of each test using varying file sizes and numbers of
|
||||
files.
|
||||
This command runs a series of uploads and downloads to the remote, measuring
|
||||
and printing the speed of each test using varying file sizes and numbers of
|
||||
files.
|
||||
|
||||
Test time can be innaccurate with small file caps and large files. As it
|
||||
uses the results of an initial test to determine how many files to use in
|
||||
each subsequent test.
|
||||
Test time can be innaccurate with small file caps and large files. As it
|
||||
uses the results of an initial test to determine how many files to use in
|
||||
each subsequent test.
|
||||
|
||||
It is recommended to use -q flag for a simpler output. e.g.:
|
||||
|
||||
rlone test speed remote: -q
|
||||
It is recommended to use -q flag for a simpler output. e.g.:
|
||||
|
||||
**NB** This command will create and delete files on the remote in a randomly
|
||||
named directory which should be tidied up after.
|
||||
rclone test speed remote: -q
|
||||
|
||||
You can use the --json flag to only print the results in JSON format.`,
|
||||
**NB** This command will create and delete files on the remote in a randomly
|
||||
named directory which will be automatically removed on a clean exit.
|
||||
|
||||
You can use the --json flag to only print the results in JSON format.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
|
||||
@@ -116,6 +116,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
@@ -128,12 +129,14 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
||||
{{< provider name="Filen" home="https://www.filen.io/" config="/filen/" >}}
|
||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
@@ -202,6 +205,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||
@@ -211,7 +215,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}}
|
||||
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
|
||||
@@ -237,7 +237,6 @@ It would be possible to add ISO support fairly easily as the library we use ([go
|
||||
It would be possible to add write support, but this would only be for creating new archives, not for updating existing archives.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/archive/archive.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to archive (Read archives).
|
||||
|
||||
@@ -1047,3 +1047,26 @@ put them back in again. -->
|
||||
- Sean Turner <30396892+seanturner026@users.noreply.github.com>
|
||||
- jijamik <30904953+jijamik@users.noreply.github.com>
|
||||
- Dominik Sander <git@dsander.de>
|
||||
- Nikolay Kiryanov <nikolay@kiryanov.ru>
|
||||
- Diana <5275194+DianaNites@users.noreply.github.com>
|
||||
- Duncan Smart <duncan.smart@gmail.com>
|
||||
- vicerace <vicerace@sohu.com>
|
||||
- Cliff Frey <cliff@openai.com>
|
||||
- Vladislav Tropnikov <vtr.name@gmail.com>
|
||||
- Leo <i@hardrain980.com>
|
||||
- Johannes Rothe <mail@johannes-rothe.de>
|
||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||
- jhasse-shade <jacob@shade.inc>
|
||||
- vyv03354 <VYV03354@nifty.ne.jp>
|
||||
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
|
||||
- vupn0712 <126212736+vupn0712@users.noreply.github.com>
|
||||
- darkdragon-001 <darkdragon-001@users.noreply.github.com>
|
||||
- sys6101 <csvmen@gmail.com>
|
||||
- Nicolas Dessart <nds@outsight.tech>
|
||||
- Qingwei Li <332664203@qq.com>
|
||||
- yy <yhymmt37@gmail.com>
|
||||
- Marc-Philip <marc-philip.werner@sap.com>
|
||||
- Mikel Olasagasti Uranga <mikel@olasagasti.info>
|
||||
- Nick Owens <mischief@offblast.org>
|
||||
- hyusap <paulayush@gmail.com>
|
||||
|
||||
@@ -103,6 +103,26 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
|
||||
chunks only have an MD5 if the source remote was capable of MD5
|
||||
hashes, e.g. the local disk.
|
||||
|
||||
### Metadata and tags
|
||||
|
||||
Rclone can map arbitrary metadata to Azure Blob headers, user metadata, and tags
|
||||
when `--metadata` is enabled (or when using `--metadata-set` / `--metadata-mapper`).
|
||||
|
||||
- Headers: Set these keys in metadata to map to the corresponding blob headers:
|
||||
- `cache-control`, `content-disposition`, `content-encoding`, `content-language`, `content-type`.
|
||||
- User metadata: Any other non-reserved keys are written as user metadata
|
||||
(keys are normalized to lowercase). Keys starting with `x-ms-` are reserved and
|
||||
are not stored as user metadata.
|
||||
- Tags: Provide `x-ms-tags` as a comma-separated list of `key=value` pairs, e.g.
|
||||
`x-ms-tags=env=dev,team=sync`. These are applied as blob tags on upload and on
|
||||
server-side copies. Whitespace around keys/values is ignored.
|
||||
- Modtime override: Provide `mtime` in RFC3339/RFC3339Nano format to override the
|
||||
stored modtime persisted in user metadata. If `mtime` cannot be parsed, rclone
|
||||
logs a debug message and ignores the override.
|
||||
|
||||
Notes:
|
||||
- Rclone ignores reserved `x-ms-*` keys (except `x-ms-tags`) for user metadata.
|
||||
|
||||
### Performance
|
||||
|
||||
When uploading large files, increasing the value of
|
||||
@@ -959,13 +979,13 @@ Properties:
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request.
|
||||
- It's a default value.
|
||||
- "blob"
|
||||
- Blob data within this container can be read via anonymous request.
|
||||
- "container"
|
||||
- Allow full public read access for container and blob data.
|
||||
- ""
|
||||
- The container and its blobs can be accessed only with an authorized request.
|
||||
- It's a default value.
|
||||
- "blob"
|
||||
- Blob data within this container can be read via anonymous request.
|
||||
- "container"
|
||||
- Allow full public read access for container and blob data.
|
||||
|
||||
#### --azureblob-directory-markers
|
||||
|
||||
@@ -1022,12 +1042,12 @@ Properties:
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Choices:
|
||||
- ""
|
||||
- By default, the delete operation fails if a blob has snapshots
|
||||
- "include"
|
||||
- Specify 'include' to remove the root blob and all its snapshots
|
||||
- "only"
|
||||
- Specify 'only' to remove only the snapshots but keep the root blob.
|
||||
- ""
|
||||
- By default, the delete operation fails if a blob has snapshots
|
||||
- "include"
|
||||
- Specify 'include' to remove the root blob and all its snapshots
|
||||
- "only"
|
||||
- Specify 'only' to remove only the snapshots but keep the root blob.
|
||||
|
||||
#### --azureblob-description
|
||||
|
||||
|
||||
@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
||||
All copy commands send the following 4 requests:
|
||||
|
||||
```text
|
||||
/b2api/v1/b2_authorize_account
|
||||
/b2api/v4/b2_authorize_account
|
||||
/b2api/v1/b2_create_bucket
|
||||
/b2api/v1/b2_list_buckets
|
||||
/b2api/v1/b2_list_file_names
|
||||
@@ -667,6 +667,71 @@ Properties:
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --b2-sse-customer-algorithm
|
||||
|
||||
If using SSE-C, the server-side encryption algorithm used when storing this object in B2.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sse_customer_algorithm
|
||||
- Env Var: RCLONE_B2_SSE_CUSTOMER_ALGORITHM
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- None
|
||||
- "AES256"
|
||||
- Advanced Encryption Standard (256 bits key length)
|
||||
|
||||
#### --b2-sse-customer-key
|
||||
|
||||
To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sse_customer_key
|
||||
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- None
|
||||
|
||||
#### --b2-sse-customer-key-base64
|
||||
|
||||
To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sse_customer_key_base64
|
||||
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY_BASE64
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- None
|
||||
|
||||
#### --b2-sse-customer-key-md5
|
||||
|
||||
If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
|
||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: sse_customer_key_md5
|
||||
- Env Var: RCLONE_B2_SSE_CUSTOMER_KEY_MD5
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- ""
|
||||
- None
|
||||
|
||||
#### --b2-description
|
||||
|
||||
Description of the remote.
|
||||
@@ -682,9 +747,11 @@ Properties:
|
||||
|
||||
Here are the commands specific to the b2 backend.
|
||||
|
||||
Run them with
|
||||
Run them with:
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
```console
|
||||
rclone backend COMMAND remote:
|
||||
```
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
@@ -696,35 +763,41 @@ These can be run on a running backend using the rc command
|
||||
|
||||
### lifecycle
|
||||
|
||||
Read or set the lifecycle for a bucket
|
||||
Read or set the lifecycle for a bucket.
|
||||
|
||||
rclone backend lifecycle remote: [options] [<arguments>+]
|
||||
```console
|
||||
rclone backend lifecycle remote: [options] [<arguments>+]
|
||||
```
|
||||
|
||||
This command can be used to read or set the lifecycle for a bucket.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
To show the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket
|
||||
```console
|
||||
rclone backend lifecycle b2:bucket
|
||||
```
|
||||
|
||||
This will dump something like this showing the lifecycle rules.
|
||||
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
```json
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
If there are no lifecycle rules (the default) then it will just return [].
|
||||
If there are no lifecycle rules (the default) then it will just return `[]`.
|
||||
|
||||
To reset the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
```console
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
```
|
||||
|
||||
This will run and then print the new lifecycle rules as above.
|
||||
|
||||
@@ -736,22 +809,27 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
||||
the config also which will mean deletions won't cause versions but
|
||||
overwrites will still cause versions to be made.
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
```console
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
```
|
||||
|
||||
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>
|
||||
|
||||
Options:
|
||||
|
||||
- "daysFromHidingToDeleting": After a file has been hidden for this many days it is deleted. 0 is off.
|
||||
- "daysFromStartingToCancelingUnfinishedLargeFiles": Cancels any unfinished large file versions after this many days
|
||||
- "daysFromUploadingToHiding": This many days after uploading a file is hidden
|
||||
- "daysFromHidingToDeleting": After a file has been hidden for this many days
|
||||
it is deleted. 0 is off.
|
||||
- "daysFromStartingToCancelingUnfinishedLargeFiles": Cancels any unfinished
|
||||
large file versions after this many days.
|
||||
- "daysFromUploadingToHiding": This many days after uploading a file is hidden.
|
||||
|
||||
### cleanup
|
||||
|
||||
Remove unfinished large file uploads.
|
||||
|
||||
rclone backend cleanup remote: [options] [<arguments>+]
|
||||
```console
|
||||
rclone backend cleanup remote: [options] [<arguments>+]
|
||||
```
|
||||
|
||||
This command removes unfinished large file uploads of age greater than
|
||||
max-age, which defaults to 24 hours.
|
||||
@@ -759,29 +837,33 @@ max-age, which defaults to 24 hours.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
```console
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
```
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
|
||||
|
||||
Options:
|
||||
|
||||
- "max-age": Max age of upload to delete
|
||||
- "max-age": Max age of upload to delete.
|
||||
|
||||
### cleanup-hidden
|
||||
|
||||
Remove old versions of files.
|
||||
|
||||
rclone backend cleanup-hidden remote: [options] [<arguments>+]
|
||||
```console
|
||||
rclone backend cleanup-hidden remote: [options] [<arguments>+]
|
||||
```
|
||||
|
||||
This command removes any old hidden versions of files.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
|
||||
```console
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
```
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
|
||||
@@ -1047,20 +1047,16 @@ encodings.)
|
||||
The following backends have known issues that need more investigation:
|
||||
|
||||
<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
- `TestGoFile` (`gofile`)
|
||||
- [`TestBisyncRemoteLocal/all_changed`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [`TestBisyncRemoteLocal/backupdir`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [`TestBisyncRemoteLocal/basic`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [`TestBisyncRemoteLocal/changes`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [`TestBisyncRemoteLocal/check_access`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [78 more](https://pub.rclone.org/integration-tests/current/)
|
||||
- Updated: 2025-08-21-010015
|
||||
- `TestDropbox` (`dropbox`)
|
||||
- [`TestBisyncRemoteRemote/normalization`](https://pub.rclone.org/integration-tests/current/dropbox-cmd.bisync-TestDropbox-1.txt)
|
||||
- Updated: 2025-11-21-010037
|
||||
<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
|
||||
The following backends either have not been tested recently or have known issues
|
||||
that are deemed unfixable for the time being:
|
||||
|
||||
<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
- `TestArchive` (`archive`)
|
||||
- `TestCache` (`cache`)
|
||||
- `TestFileLu` (`filelu`)
|
||||
- `TestFilesCom` (`filescom`)
|
||||
|
||||
@@ -323,6 +323,19 @@ Properties:
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --box-config-credentials
|
||||
|
||||
Box App config.json contents.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: config_credentials
|
||||
- Env Var: RCLONE_BOX_CONFIG_CREDENTIALS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --box-access-token
|
||||
|
||||
Box App Primary Access Token
|
||||
@@ -347,10 +360,10 @@ Properties:
|
||||
- Type: string
|
||||
- Default: "user"
|
||||
- Examples:
|
||||
- "user"
|
||||
- Rclone should act on behalf of a user.
|
||||
- "enterprise"
|
||||
- Rclone should act on behalf of a service account.
|
||||
- "user"
|
||||
- Rclone should act on behalf of a user.
|
||||
- "enterprise"
|
||||
- Rclone should act on behalf of a service account.
|
||||
|
||||
### Advanced options
|
||||
|
||||
|
||||
@@ -394,12 +394,12 @@ Properties:
|
||||
- Type: SizeSuffix
|
||||
- Default: 5Mi
|
||||
- Examples:
|
||||
- "1M"
|
||||
- 1 MiB
|
||||
- "5M"
|
||||
- 5 MiB
|
||||
- "10M"
|
||||
- 10 MiB
|
||||
- "1M"
|
||||
- 1 MiB
|
||||
- "5M"
|
||||
- 5 MiB
|
||||
- "10M"
|
||||
- 10 MiB
|
||||
|
||||
#### --cache-info-age
|
||||
|
||||
@@ -414,12 +414,12 @@ Properties:
|
||||
- Type: Duration
|
||||
- Default: 6h0m0s
|
||||
- Examples:
|
||||
- "1h"
|
||||
- 1 hour
|
||||
- "24h"
|
||||
- 24 hours
|
||||
- "48h"
|
||||
- 48 hours
|
||||
- "1h"
|
||||
- 1 hour
|
||||
- "24h"
|
||||
- 24 hours
|
||||
- "48h"
|
||||
- 48 hours
|
||||
|
||||
#### --cache-chunk-total-size
|
||||
|
||||
@@ -435,12 +435,12 @@ Properties:
|
||||
- Type: SizeSuffix
|
||||
- Default: 10Gi
|
||||
- Examples:
|
||||
- "500M"
|
||||
- 500 MiB
|
||||
- "1G"
|
||||
- 1 GiB
|
||||
- "10G"
|
||||
- 10 GiB
|
||||
- "500M"
|
||||
- 500 MiB
|
||||
- "1G"
|
||||
- 1 GiB
|
||||
- "10G"
|
||||
- 10 GiB
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -698,9 +698,11 @@ Properties:
|
||||
|
||||
Here are the commands specific to the cache backend.
|
||||
|
||||
Run them with
|
||||
Run them with:
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
```console
|
||||
rclone backend COMMAND remote:
|
||||
```
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
@@ -714,6 +716,8 @@ These can be run on a running backend using the rc command
|
||||
|
||||
Print stats on the cache backend in JSON format.
|
||||
|
||||
rclone backend stats remote: [options] [<arguments>+]
|
||||
```console
|
||||
rclone backend stats remote: [options] [<arguments>+]
|
||||
```
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
@@ -6,6 +6,146 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.72.1 - 2025-12-10
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
|
||||
|
||||
- Bug Fixes
|
||||
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
|
||||
- doc fixes (Duncan Smart, Nick Craig-Wood)
|
||||
- configfile: Fix piped config support (Jonas Tingeborn)
|
||||
- log
|
||||
- Fix PID not included in JSON log output (Tingsong Xu)
|
||||
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
|
||||
- Google Cloud Storage
|
||||
- Improve endpoint parameter docs (Johannes Rothe)
|
||||
- S3
|
||||
- Add missing regions for Selectel provider (Nick Craig-Wood)
|
||||
|
||||
## v1.72.0 - 2025-11-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
||||
|
||||
- New backends
|
||||
- [Archive](/archive) backend to read archives on cloud storage. (Nick Craig-Wood)
|
||||
- New S3 providers
|
||||
- [Cubbit Object Storage](/s3/#Cubbit) (Marco Ferretti)
|
||||
- [FileLu S5 Object Storage](/s3/#filelu-s5) (kingston125)
|
||||
- [Hetzner Object Storage](/s3/#hetzner) (spiffytech)
|
||||
- [Intercolo Object Storage](/s3/#intercolo) (Robin Rolf)
|
||||
- [Rabata S3-compatible secure cloud storage](/s3/#Rabata) (dougal)
|
||||
- [Servercore Object Storage](/s3/#servercore) (dougal)
|
||||
- [SpectraLogic](/s3/#spectralogic) (dougal)
|
||||
- New commands
|
||||
- [rclone archive](/commands/rclone_archive/): command to create and read archive files (Fawzib Rojas)
|
||||
- [rclone config string](/commands/rclone_config_string/): for making connection strings (Nick Craig-Wood)
|
||||
- [rclone test speed](/commands/rclone_test_speed/): Add command to test a specified remotes speed (dougal)
|
||||
- New Features
|
||||
- backends: many backends have has a paged listing (`ListP`) interface added
|
||||
- this enables progress when listing large directories and reduced memory usage
|
||||
- build
|
||||
- Bump golang.org/x/crypto from 0.43.0 to 0.45.0 to fix CVE-2025-58181 (dependabot[bot])
|
||||
- Modernize code and tests (Nick Craig-Wood, russcoss, juejinyuxitu, reddaisyy, dulanting, Oleksandr Redko)
|
||||
- Update all dependencies (Nick Craig-Wood)
|
||||
- Enable support for `aix/ppc64` (Lakshmi-Surekha)
|
||||
- check: Improved reporting of differences in sizes and contents (albertony)
|
||||
- copyurl: Added `--url` to read URLs from CSV file (S-Pegg1, dougal)
|
||||
- docs:
|
||||
- markdown linting (albertony)
|
||||
- fixes (albertony, Andrew Gunnerson, anon-pradip, Claudius Ellsel, dougal, iTrooz, Jean-Christophe Cura, Joseph Brownlee, kapitainsky, Matt LaPaglia, n4n5, Nick Craig-Wood, nielash, SublimePeace, Ted Robertson, vastonus)
|
||||
- fs: remove unnecessary Seek call on log file (Aneesh Agrawal)
|
||||
- hashsum: Improved output format when listing algorithms (albertony)
|
||||
- lib/http: Cleanup indentation and other whitespace in http serve template (albertony)
|
||||
- lsf: Add support for `unix` and `unixnano` time formats (Motte)
|
||||
- oauthutil: Improved debug logs from token refresh (albertony)
|
||||
- rc
|
||||
- Add [job/batch](/rc/#job-batch) for sending batches of rc commands to run concurrently (Nick Craig-Wood)
|
||||
- Add `runningIds` and `finishedIds` to [job/list](/rc/#job-list) (n4n5)
|
||||
- Add `osVersion`, `osKernel` and `osArch` to [core/version](/rc/#core-version) (Nick Craig-Wood)
|
||||
- Make sure fatal errors run via the rc don't crash rclone (Nick Craig-Wood)
|
||||
- Add `executeId` to job statuses in [job/list](/rc/#job-list) (Nikolay Kiryanov)
|
||||
- `config/unlock`: rename parameter to `configPassword` accept old as well (Nick Craig-Wood)
|
||||
- serve http: Download folders as zip (dougal)
|
||||
- Bug Fixes
|
||||
- build
|
||||
- Fix tls: failed to verify certificate: x509: negative serial number (Nick Craig-Wood)
|
||||
- march
|
||||
- Fix `--no-traverse` being very slow (Nick Craig-Wood)
|
||||
- serve s3: Fix log output to remove the EXTRA messages (iTrooz)
|
||||
- Mount
|
||||
- Windows: improve error message on missing WinFSP (divinity76)
|
||||
- Local
|
||||
- Add `--skip-specials` to ignore special files (Adam Dinwoodie)
|
||||
- Azure Blob
|
||||
- Add ListP interface (dougal)
|
||||
- Azurefiles
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- B2
|
||||
- Add ListP interface (dougal)
|
||||
- Add Server-Side encryption support (fries1234)
|
||||
- Fix "expected a FileSseMode but found: ''" (dougal)
|
||||
- Allow individual old versions to be deleted with `--b2-versions` (dougal)
|
||||
- Box
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Allow configuration with config file contents (Dominik Sander)
|
||||
- Compress
|
||||
- Add zstd compression (Alex)
|
||||
- Drive
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Dropbox
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Fix error moving just created objects (Nick Craig-Wood)
|
||||
- FTP
|
||||
- Fix SOCKS proxy support (dougal)
|
||||
- Fix transfers from servers that return 250 ok messages (jijamik)
|
||||
- Google Cloud Storage
|
||||
- Add ListP interface (dougal)
|
||||
- Fix `--gcs-storage-class` to work with server side copy for objects (Riaz Arbi)
|
||||
- HTTP
|
||||
- Add basic metadata and provide it via serve (Oleg Kunitsyn)
|
||||
- Jottacloud
|
||||
- Add support for Let's Go Cloud (from MediaMarkt) as a whitelabel service (albertony)
|
||||
- Add support for MediaMarkt Cloud as a whitelabel service (albertony)
|
||||
- Added support for traditional oauth authentication also for the main service (albertony)
|
||||
- Abort attempts to run unsupported rclone authorize command (albertony)
|
||||
- Improved token refresh handling (albertony)
|
||||
- Fix legacy authentication (albertony)
|
||||
- Fix authentication for whitelabel services from Elkjøp subsidiaries (albertony)
|
||||
- Mega
|
||||
- Implement 2FA login (iTrooz)
|
||||
- Memory
|
||||
- Add ListP interface (dougal)
|
||||
- Onedrive
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Oracle Object Storage
|
||||
- Add ListP interface (dougal)
|
||||
- Pcloud
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Proton Drive
|
||||
- Automated 2FA login with OTP secret key (Microscotch)
|
||||
- S3
|
||||
- Make it easier to add new S3 providers (dougal)
|
||||
- Add `--s3-use-data-integrity-protections` quirk to fix BadDigest error in Alibaba, Tencent (hunshcn)
|
||||
- Add support for `--upload-header`, `If-Match` and `If-None-Match` (Sean Turner)
|
||||
- Fix single file copying behavior with low permission (hunshcn)
|
||||
- SFTP
|
||||
- Fix zombie SSH processes with `--sftp-ssh` (Copilot)
|
||||
- Smb
|
||||
- Optimize smb mount performance by avoiding stat checks during initialization (Sudipto Baral)
|
||||
- Swift
|
||||
- Add ListP interface (dougal)
|
||||
- If storage_policy isn't set, use the root containers policy (Andrew Ruthven)
|
||||
- Report disk usage in segment containers (Andrew Ruthven)
|
||||
- Ulozto
|
||||
- Implement the About functionality (Lukas Krejci)
|
||||
- Fix downloads returning HTML error page (aliaj1)
|
||||
- WebDAV
|
||||
- Optimize bearer token fetching with singleflight (hunshcn)
|
||||
- Add ListP interface (Nick Craig-Wood)
|
||||
- Use SpaceSepList to parse bearer token command (hunshcn)
|
||||
- Add `Access-Control-Max-Age` header for CORS preflight caching (viocha)
|
||||
- Fix out of memory with sharepoint-ntlm when uploading large file (Nick Craig-Wood)
|
||||
|
||||
## v1.71.2 - 2025-10-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.1...v1.71.2)
|
||||
|
||||
@@ -356,22 +356,22 @@ Properties:
|
||||
- Type: string
|
||||
- Default: "md5"
|
||||
- Examples:
|
||||
- "none"
|
||||
- Pass any hash supported by wrapped remote for non-chunked files.
|
||||
- Return nothing otherwise.
|
||||
- "md5"
|
||||
- MD5 for composite files.
|
||||
- "sha1"
|
||||
- SHA1 for composite files.
|
||||
- "md5all"
|
||||
- MD5 for all files.
|
||||
- "sha1all"
|
||||
- SHA1 for all files.
|
||||
- "md5quick"
|
||||
- Copying a file to chunker will request MD5 from the source.
|
||||
- Falling back to SHA1 if unsupported.
|
||||
- "sha1quick"
|
||||
- Similar to "md5quick" but prefers SHA1 over MD5.
|
||||
- "none"
|
||||
- Pass any hash supported by wrapped remote for non-chunked files.
|
||||
- Return nothing otherwise.
|
||||
- "md5"
|
||||
- MD5 for composite files.
|
||||
- "sha1"
|
||||
- SHA1 for composite files.
|
||||
- "md5all"
|
||||
- MD5 for all files.
|
||||
- "sha1all"
|
||||
- SHA1 for all files.
|
||||
- "md5quick"
|
||||
- Copying a file to chunker will request MD5 from the source.
|
||||
- Falling back to SHA1 if unsupported.
|
||||
- "sha1quick"
|
||||
- Similar to "md5quick" but prefers SHA1 over MD5.
|
||||
|
||||
### Advanced options
|
||||
|
||||
@@ -421,13 +421,13 @@ Properties:
|
||||
- Type: string
|
||||
- Default: "simplejson"
|
||||
- Examples:
|
||||
- "none"
|
||||
- Do not use metadata files at all.
|
||||
- Requires hash type "none".
|
||||
- "simplejson"
|
||||
- Simple JSON supports hash sums and chunk validation.
|
||||
-
|
||||
- It has the following fields: ver, size, nchunks, md5, sha1.
|
||||
- "none"
|
||||
- Do not use metadata files at all.
|
||||
- Requires hash type "none".
|
||||
- "simplejson"
|
||||
- Simple JSON supports hash sums and chunk validation.
|
||||
-
|
||||
- It has the following fields: ver, size, nchunks, md5, sha1.
|
||||
|
||||
#### --chunker-fail-hard
|
||||
|
||||
@@ -440,10 +440,10 @@ Properties:
|
||||
- Type: bool
|
||||
- Default: false
|
||||
- Examples:
|
||||
- "true"
|
||||
- Report errors and abort current command.
|
||||
- "false"
|
||||
- Warn user, skip incomplete file and proceed.
|
||||
- "true"
|
||||
- Report errors and abort current command.
|
||||
- "false"
|
||||
- Warn user, skip incomplete file and proceed.
|
||||
|
||||
#### --chunker-transactions
|
||||
|
||||
@@ -456,19 +456,19 @@ Properties:
|
||||
- Type: string
|
||||
- Default: "rename"
|
||||
- Examples:
|
||||
- "rename"
|
||||
- Rename temporary files after a successful transaction.
|
||||
- "norename"
|
||||
- Leave temporary file names and write transaction ID to metadata file.
|
||||
- Metadata is required for no rename transactions (meta format cannot be "none").
|
||||
- If you are using norename transactions you should be careful not to downgrade Rclone
|
||||
- as older versions of Rclone don't support this transaction style and will misinterpret
|
||||
- files manipulated by norename transactions.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
- "auto"
|
||||
- Rename or norename will be used depending on capabilities of the backend.
|
||||
- If meta format is set to "none", rename transactions will always be used.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
- "rename"
|
||||
- Rename temporary files after a successful transaction.
|
||||
- "norename"
|
||||
- Leave temporary file names and write transaction ID to metadata file.
|
||||
- Metadata is required for no rename transactions (meta format cannot be "none").
|
||||
- If you are using norename transactions you should be careful not to downgrade Rclone
|
||||
- as older versions of Rclone don't support this transaction style and will misinterpret
|
||||
- files manipulated by norename transactions.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
- "auto"
|
||||
- Rename or norename will be used depending on capabilities of the backend.
|
||||
- If meta format is set to "none", rename transactions will always be used.
|
||||
- This method is EXPERIMENTAL, don't use on production systems.
|
||||
|
||||
#### --chunker-description
|
||||
|
||||
|
||||
@@ -15,8 +15,6 @@ mounting them, listing them in lots of different ways.
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
documentation, changelog and configuration walkthroughs.
|
||||
|
||||
|
||||
|
||||
```
|
||||
rclone [flags]
|
||||
```
|
||||
@@ -26,6 +24,8 @@ rclone [flags]
|
||||
```
|
||||
--alias-description string Description of the remote
|
||||
--alias-remote string Remote or path to alias
|
||||
--archive-description string Description of the remote
|
||||
--archive-remote string Remote to wrap to read archives from
|
||||
--ask-password Allow prompt for password for encrypted configuration (default true)
|
||||
--auto-confirm If enabled, do not request console confirmation
|
||||
--azureblob-access-tier string Access tier of blob: hot, cool, cold or archive
|
||||
@@ -105,6 +105,10 @@ rclone [flags]
|
||||
--b2-hard-delete Permanently delete files on remote removal, otherwise hide files
|
||||
--b2-key string Application Key
|
||||
--b2-lifecycle int Set the number of days deleted files should be kept when creating a bucket
|
||||
--b2-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in B2
|
||||
--b2-sse-customer-key string To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
||||
--b2-sse-customer-key-base64 string To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
||||
--b2-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional)
|
||||
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging
|
||||
--b2-upload-concurrency int Concurrency for multipart uploads (default 4)
|
||||
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
@@ -181,7 +185,7 @@ rclone [flags]
|
||||
--combine-upstreams SpaceSepList Upstreams for combining
|
||||
--compare-dest stringArray Include additional server-side paths during comparison
|
||||
--compress-description string Description of the remote
|
||||
--compress-level int GZIP compression level (-2 to 9) (default -1)
|
||||
--compress-level string GZIP (levels -2 to 9):
|
||||
--compress-mode string Compression mode (default "gzip")
|
||||
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
|
||||
--compress-remote string Remote to compress
|
||||
@@ -549,6 +553,7 @@ rclone [flags]
|
||||
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||
--max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer (default off)
|
||||
--mega-2fa string The 2FA code of your MEGA account if the account is set up with one
|
||||
--mega-debug Output more debug from Mega
|
||||
--mega-description string Description of the remote
|
||||
--mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
|
||||
@@ -715,6 +720,7 @@ rclone [flags]
|
||||
--protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot)
|
||||
--protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured)
|
||||
--protondrive-original-file-size Return the file size before encryption (default true)
|
||||
--protondrive-otp-secret-key string The OTP secret key (obscured)
|
||||
--protondrive-password string The password of your proton account (obscured)
|
||||
--protondrive-replace-existing-draft Create a new revision when filename conflict is detected
|
||||
--protondrive-username string The username of your proton account
|
||||
@@ -831,6 +837,7 @@ rclone [flags]
|
||||
--s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
|
||||
--s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset)
|
||||
--s3-use-arn-region If true, enables arn region support for the service
|
||||
--s3-use-data-integrity-protections Tristate If true use AWS S3 data integrity protections (default unset)
|
||||
--s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support)
|
||||
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
|
||||
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
|
||||
@@ -915,6 +922,7 @@ rclone [flags]
|
||||
--sia-user-agent string Siad User Agent (default "Sia-Agent")
|
||||
--size-only Skip based on size only, not modtime or checksum
|
||||
--skip-links Don't warn about skipped symlinks
|
||||
--skip-specials Don't warn about skipped pipes, sockets and device objects
|
||||
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
|
||||
--smb-description string Description of the remote
|
||||
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
|
||||
@@ -1007,15 +1015,11 @@ rclone [flags]
|
||||
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
|
||||
--union-upstreams string List of space separated upstreams
|
||||
-u, --update Skip files that are newer on the destination
|
||||
--uptobox-access-token string Your access token
|
||||
--uptobox-description string Description of the remote
|
||||
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
|
||||
--uptobox-private Set to make uploaded files private
|
||||
--use-cookies Enable session cookiejar
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.71.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.72.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
@@ -1057,7 +1061,11 @@ rclone [flags]
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
|
||||
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
|
||||
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
|
||||
* [rclone bisync](/commands/rclone_bisync/) - Perform bidirectional synchronization between two paths.
|
||||
@@ -1111,3 +1119,5 @@ rclone [flags]
|
||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -15,40 +15,46 @@ output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from `rclone about remote:` is:
|
||||
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
```text
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
```
|
||||
|
||||
Where the fields are:
|
||||
|
||||
* Total: Total size available.
|
||||
* Used: Total size used.
|
||||
* Free: Total space available to this user.
|
||||
* Trashed: Total space used by trash.
|
||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
* Objects: Total number of objects in the storage.
|
||||
- Total: Total size available.
|
||||
- Used: Total size used.
|
||||
- Free: Total space available to this user.
|
||||
- Trashed: Total space used by trash.
|
||||
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
- Objects: Total number of objects in the storage.
|
||||
|
||||
All sizes are in number of bytes.
|
||||
|
||||
Applying a `--full` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
```text
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
```
|
||||
|
||||
A `--json` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
```json
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
```
|
||||
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
@@ -56,7 +62,6 @@ provided by a backend. Where the value is unlimited it is omitted.
|
||||
Some backends does not support the `rclone about` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
|
||||
|
||||
```
|
||||
rclone about remote: [flags]
|
||||
```
|
||||
@@ -73,5 +78,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
47
docs/content/commands/rclone_archive.md
Normal file
47
docs/content/commands/rclone_archive.md
Normal file
@@ -0,0 +1,47 @@
|
||||
---
|
||||
title: "rclone archive"
|
||||
description: "Perform an action on an archive."
|
||||
versionIntroduced: v1.72
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone archive
|
||||
|
||||
Perform an action on an archive.
|
||||
|
||||
## Synopsis
|
||||
|
||||
Perform an action on an archive. Requires the use of a
|
||||
subcommand to specify the protocol, e.g.
|
||||
|
||||
rclone archive list remote:file.zip
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
|
||||
See [rclone archive create](/commands/rclone_archive_create/) for the
|
||||
archive formats supported.
|
||||
|
||||
|
||||
```
|
||||
rclone archive <action> [opts] <source> [<destination>] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for archive
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
* [rclone archive create](/commands/rclone_archive_create/) - Archive source file(s) to destination.
|
||||
* [rclone archive extract](/commands/rclone_archive_extract/) - Extract archives from source to destination.
|
||||
* [rclone archive list](/commands/rclone_archive_list/) - List archive contents from source.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
95
docs/content/commands/rclone_archive_create.md
Normal file
95
docs/content/commands/rclone_archive_create.md
Normal file
@@ -0,0 +1,95 @@
|
||||
---
|
||||
title: "rclone archive create"
|
||||
description: "Archive source file(s) to destination."
|
||||
versionIntroduced: v1.72
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/create/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone archive create
|
||||
|
||||
Archive source file(s) to destination.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
Creates an archive from the files in source:path and saves the archive to
|
||||
dest:path. If dest:path is missing, it will write to the console.
|
||||
|
||||
The valid formats for the `--format` flag are listed below. If
|
||||
`--format` is not set rclone will guess it from the extension of dest:path.
|
||||
|
||||
| Format | Extensions |
|
||||
|:-------|:-----------|
|
||||
| zip | .zip |
|
||||
| tar | .tar |
|
||||
| tar.gz | .tar.gz, .tgz, .taz |
|
||||
| tar.bz2| .tar.bz2, .tb2, .tbz, .tbz2, .tz2 |
|
||||
| tar.lz | .tar.lz |
|
||||
| tar.lz4| .tar.lz4 |
|
||||
| tar.xz | .tar.xz, .txz |
|
||||
| tar.zst| .tar.zst, .tzst |
|
||||
| tar.br | .tar.br |
|
||||
| tar.sz | .tar.sz |
|
||||
| tar.mz | .tar.mz |
|
||||
|
||||
The `--prefix` and `--full-path` flags control the prefix for the files
|
||||
in the archive.
|
||||
|
||||
If the flag `--full-path` is set then the files will have the full source
|
||||
path as the prefix.
|
||||
|
||||
If the flag `--prefix=<value>` is set then the files will have
|
||||
`<value>` as prefix. It's possible to create invalid file names with
|
||||
`--prefix=<value>` so use with caution. Flag `--prefix` has
|
||||
priority over `--full-path`.
|
||||
|
||||
Given a directory `/sourcedir` with the following:
|
||||
|
||||
file1.txt
|
||||
dir1/file2.txt
|
||||
|
||||
Running the command `rclone archive create /sourcedir /dest.tar.gz`
|
||||
will make an archive with the contents:
|
||||
|
||||
file1.txt
|
||||
dir1/
|
||||
dir1/file2.txt
|
||||
|
||||
Running the command `rclone archive create --full-path /sourcedir /dest.tar.gz`
|
||||
will make an archive with the contents:
|
||||
|
||||
sourcedir/file1.txt
|
||||
sourcedir/dir1/
|
||||
sourcedir/dir1/file2.txt
|
||||
|
||||
Running the command `rclone archive create --prefix=my_new_path /sourcedir /dest.tar.gz`
|
||||
will make an archive with the contents:
|
||||
|
||||
my_new_path/file1.txt
|
||||
my_new_path/dir1/
|
||||
my_new_path/dir1/file2.txt
|
||||
|
||||
|
||||
```
|
||||
rclone archive create [flags] <source> [<destination>]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--format string Create the archive with format or guess from extension.
|
||||
--full-path Set prefix for files in archive to source path
|
||||
-h, --help help for create
|
||||
--prefix string Set prefix for files in archive to entered value or source path
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
81
docs/content/commands/rclone_archive_extract.md
Normal file
81
docs/content/commands/rclone_archive_extract.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: "rclone archive extract"
|
||||
description: "Extract archives from source to destination."
|
||||
versionIntroduced: v1.72
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/extract/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone archive extract
|
||||
|
||||
Extract archives from source to destination.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
|
||||
Extract the archive contents to a destination directory auto detecting
|
||||
the format. See [rclone archive create](/commands/rclone_archive_create/)
|
||||
for the archive formats supported.
|
||||
|
||||
For example on this archive:
|
||||
|
||||
```
|
||||
$ rclone archive list --long remote:archive.zip
|
||||
6 2025-10-30 09:46:23.000000000 file.txt
|
||||
0 2025-10-30 09:46:57.000000000 dir/
|
||||
4 2025-10-30 09:46:57.000000000 dir/bye.txt
|
||||
```
|
||||
|
||||
You can run extract like this
|
||||
|
||||
```
|
||||
$ rclone archive extract remote:archive.zip remote:extracted
|
||||
```
|
||||
|
||||
Which gives this result
|
||||
|
||||
```
|
||||
$ rclone tree remote:extracted
|
||||
/
|
||||
├── dir
|
||||
│ └── bye.txt
|
||||
└── file.txt
|
||||
```
|
||||
|
||||
The source or destination or both can be local or remote.
|
||||
|
||||
Filters can be used to only extract certain files:
|
||||
|
||||
```
|
||||
$ rclone archive extract archive.zip partial --include "bye.*"
|
||||
$ rclone tree partial
|
||||
/
|
||||
└── dir
|
||||
└── bye.txt
|
||||
```
|
||||
|
||||
The [archive backend](/archive/) can also be used to extract files. It
|
||||
can be used to read only mount archives also but it supports a
|
||||
different set of archive formats to the archive commands.
|
||||
|
||||
|
||||
```
|
||||
rclone archive extract [flags] <source> <destination>
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
-h, --help help for extract
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
96
docs/content/commands/rclone_archive_list.md
Normal file
96
docs/content/commands/rclone_archive_list.md
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: "rclone archive list"
|
||||
description: "List archive contents from source."
|
||||
versionIntroduced: v1.72
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/archive/list/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone archive list
|
||||
|
||||
List archive contents from source.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
List the contents of an archive to the console, auto detecting the
|
||||
format. See [rclone archive create](/commands/rclone_archive_create/)
|
||||
for the archive formats supported.
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
$ rclone archive list remote:archive.zip
|
||||
6 file.txt
|
||||
0 dir/
|
||||
4 dir/bye.txt
|
||||
```
|
||||
|
||||
Or with `--long` flag for more info:
|
||||
|
||||
```
|
||||
$ rclone archive list --long remote:archive.zip
|
||||
6 2025-10-30 09:46:23.000000000 file.txt
|
||||
0 2025-10-30 09:46:57.000000000 dir/
|
||||
4 2025-10-30 09:46:57.000000000 dir/bye.txt
|
||||
```
|
||||
|
||||
Or with `--plain` flag which is useful for scripting:
|
||||
|
||||
```
|
||||
$ rclone archive list --plain /path/to/archive.zip
|
||||
file.txt
|
||||
dir/
|
||||
dir/bye.txt
|
||||
```
|
||||
|
||||
Or with `--dirs-only`:
|
||||
|
||||
```
|
||||
$ rclone archive list --plain --dirs-only /path/to/archive.zip
|
||||
dir/
|
||||
```
|
||||
|
||||
Or with `--files-only`:
|
||||
|
||||
```
|
||||
$ rclone archive list --plain --files-only /path/to/archive.zip
|
||||
file.txt
|
||||
dir/bye.txt
|
||||
```
|
||||
|
||||
Filters may also be used:
|
||||
|
||||
```
|
||||
$ rclone archive list --long archive.zip --include "bye.*"
|
||||
4 2025-10-30 09:46:57.000000000 dir/bye.txt
|
||||
```
|
||||
|
||||
The [archive backend](/archive/) can also be used to list files. It
|
||||
can be used to read only mount archives also but it supports a
|
||||
different set of archive formats to the archive commands.
|
||||
|
||||
|
||||
```
|
||||
rclone archive list [flags] <source>
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--dirs-only Only list directories
|
||||
--files-only Only list files
|
||||
-h, --help help for list
|
||||
--long List extra attributtes
|
||||
--plain Only list file names
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone archive](/commands/rclone_archive/) - Perform an action on an archive.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
@@ -11,21 +11,23 @@ Remote authorization.
|
||||
## Synopsis
|
||||
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
rclone from a machine with a browser. Use as instructed by rclone config.
|
||||
See also the [remote setup documentation](/remote_setup).
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
- Name of a backend (e.g. "drive", "s3")
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
Use --template to generate HTML output via a custom Go template. If a blank
|
||||
string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
rclone authorize <backendname> [base64_json_blob | client_id client_secret] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
@@ -40,5 +42,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,27 +16,34 @@ see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
```console
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
```
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
```console
|
||||
rclone backend features remote:
|
||||
```
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
```console
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
```
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
```console
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
```
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
|
||||
|
||||
```
|
||||
rclone backend <command> remote:path [opts] <args> [flags]
|
||||
```
|
||||
@@ -56,7 +63,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -64,5 +71,10 @@ Important flags useful for most commands
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,18 +16,19 @@ Perform bidirectional synchronization between two paths.
|
||||
bidirectional cloud sync solution in rclone.
|
||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||
On each successive run it will:
|
||||
|
||||
- list files on Path1 and Path2, and check for changes on each side.
|
||||
Changes include `New`, `Newer`, `Older`, and `Deleted` files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||
before using, or data loss can result. Questions can be asked in the
|
||||
[Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
|
||||
|
||||
```
|
||||
rclone bisync remote1:path1 remote2:path2 [flags]
|
||||
```
|
||||
@@ -69,7 +70,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for anything which can copy a file
|
||||
|
||||
```
|
||||
```text
|
||||
--check-first Do all the checks before starting transfers
|
||||
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
|
||||
--compare-dest stringArray Include additional server-side paths during comparison
|
||||
@@ -110,7 +111,7 @@ Flags for anything which can copy a file
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -120,7 +121,7 @@ Important flags useful for most commands
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -148,5 +149,10 @@ Flags for filtering directory listings
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -14,15 +14,21 @@ Sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
```sh
|
||||
rclone cat remote:path/to/file
|
||||
```
|
||||
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
```sh
|
||||
rclone cat remote:path/to/dir
|
||||
```
|
||||
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
```sh
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
```
|
||||
|
||||
Use the `--head` flag to print characters only at the start, `--tail` for
|
||||
the end and `--offset` and `--count` to print a section in the middle.
|
||||
@@ -33,14 +39,17 @@ Use the `--separator` flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
- bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
```sh
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
```
|
||||
|
||||
* powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
|
||||
- powershell:
|
||||
|
||||
```powershell
|
||||
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
|
||||
```
|
||||
|
||||
```
|
||||
rclone cat remote:path [flags]
|
||||
@@ -65,7 +74,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -95,12 +104,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -52,7 +52,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
|
||||
|
||||
```
|
||||
rclone check source:path dest:path [flags]
|
||||
```
|
||||
@@ -79,7 +78,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags used for check commands
|
||||
|
||||
```
|
||||
```text
|
||||
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
|
||||
```
|
||||
|
||||
@@ -87,7 +86,7 @@ Flags used for check commands
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -117,12 +116,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -47,7 +47,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
|
||||
|
||||
```
|
||||
rclone checksum <hash> sumfile dst:path [flags]
|
||||
```
|
||||
@@ -73,7 +72,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -103,12 +102,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -13,7 +13,6 @@ Clean up the remote if possible.
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
|
||||
```
|
||||
rclone cleanup remote:path [flags]
|
||||
```
|
||||
@@ -31,7 +30,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -39,5 +38,10 @@ Important flags useful for most commands
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -15,7 +15,6 @@ Output completion script for a given shell.
|
||||
Generates a shell completion script for rclone.
|
||||
Run with `--help` to list the supported shells.
|
||||
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
@@ -26,9 +25,14 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
* [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone.
|
||||
* [rclone completion fish](/commands/rclone_completion_fish/) - Output fish completion script for rclone.
|
||||
* [rclone completion powershell](/commands/rclone_completion_powershell/) - Output powershell completion script for rclone.
|
||||
* [rclone completion zsh](/commands/rclone_completion_zsh/) - Output zsh completion script for rclone.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -13,17 +13,21 @@ Output bash completion script for rclone.
|
||||
|
||||
Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone completion bash
|
||||
```console
|
||||
rclone completion bash
|
||||
```
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
/etc/bash_completion.d/rclone
|
||||
```console
|
||||
/etc/bash_completion.d/rclone
|
||||
```
|
||||
|
||||
and so rclone will probably need to be run as root, or with sudo.
|
||||
|
||||
If you supply a path to a file as the command line argument, then
|
||||
If you supply a path to a file as the command line argument, then
|
||||
the generated script will be written to that file, in which case
|
||||
you should not need root privileges.
|
||||
|
||||
@@ -34,12 +38,13 @@ can logout and login again to use the autocompletion script.
|
||||
|
||||
Alternatively, you can source the script directly
|
||||
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
```console
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
```
|
||||
|
||||
and the autocompletion functionality will be added to your
|
||||
current shell.
|
||||
|
||||
|
||||
```
|
||||
rclone completion bash [output_file] [flags]
|
||||
```
|
||||
@@ -54,5 +59,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,19 +16,22 @@ Generates a fish autocompletion script for rclone.
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion fish
|
||||
```console
|
||||
sudo rclone completion fish
|
||||
```
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/fish/completions/rclone.fish
|
||||
```console
|
||||
. /etc/fish/completions/rclone.fish
|
||||
```
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone completion fish [output_file] [flags]
|
||||
```
|
||||
@@ -43,5 +46,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -15,14 +15,15 @@ Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
```console
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
```
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
If output_file is "-" or missing, then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone completion powershell [output_file] [flags]
|
||||
```
|
||||
@@ -37,5 +38,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,19 +16,22 @@ Generates a zsh autocompletion script for rclone.
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion zsh
|
||||
```console
|
||||
sudo rclone completion zsh
|
||||
```
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
autoload -U compinit && compinit
|
||||
```console
|
||||
autoload -U compinit && compinit
|
||||
```
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
|
||||
|
||||
```
|
||||
rclone completion zsh [output_file] [flags]
|
||||
```
|
||||
@@ -43,5 +46,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -14,7 +14,6 @@ Enter an interactive configuration session where you can setup new
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
|
||||
|
||||
```
|
||||
rclone config [flags]
|
||||
```
|
||||
@@ -29,6 +28,9 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
* [rclone config create](/commands/rclone_config_create/) - Create a new remote with name, type and options.
|
||||
* [rclone config delete](/commands/rclone_config_delete/) - Delete an existing remote.
|
||||
@@ -43,7 +45,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
* [rclone config reconnect](/commands/rclone_config_reconnect/) - Re-authenticates user with remote.
|
||||
* [rclone config redacted](/commands/rclone_config_redacted/) - Print redacted (decrypted) config file, or the redacted config for a single remote.
|
||||
* [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote.
|
||||
* [rclone config string](/commands/rclone_config_string/) - Print connection string for a single remote.
|
||||
* [rclone config touch](/commands/rclone_config_touch/) - Ensure configuration file exists.
|
||||
* [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote.
|
||||
* [rclone config userinfo](/commands/rclone_config_userinfo/) - Prints info about logged in user of remote.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,13 +16,17 @@ should be passed in pairs of `key` `value` or as `key=value`.
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
```sh
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
```
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
```sh
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
```
|
||||
|
||||
Note that if the config process would normally ask a question the
|
||||
default is taken (unless `--non-interactive` is used). Each time
|
||||
@@ -50,29 +54,29 @@ it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
```
|
||||
|
||||
@@ -95,7 +99,9 @@ The keys of `Option` are used as follows:
|
||||
If `Error` is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
```sh
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
```
|
||||
|
||||
Note that when using `--continue` all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
@@ -111,7 +117,6 @@ defaults for questions as usual.
|
||||
Note that `bin/config.py` in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
|
||||
|
||||
```
|
||||
rclone config create name type [key value]* [flags]
|
||||
```
|
||||
@@ -134,5 +139,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -22,5 +22,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -15,7 +15,6 @@ This normally means revoking the oauth token.
|
||||
|
||||
To reconnect use "rclone config reconnect".
|
||||
|
||||
|
||||
```
|
||||
rclone config disconnect remote: [flags]
|
||||
```
|
||||
@@ -30,5 +29,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -22,5 +22,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user