mirror of
https://github.com/rclone/rclone.git
synced 2025-12-28 06:03:43 +00:00
Compare commits
26 Commits
fix-1727-o
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed85edef50 | ||
|
|
3885800959 | ||
|
|
698373fd5c | ||
|
|
51b197c86f | ||
|
|
029ffd2761 | ||
|
|
f81cd7d279 | ||
|
|
1a0a4628d7 | ||
|
|
c10a4d465c | ||
|
|
3a6e07a613 | ||
|
|
c36f99d343 | ||
|
|
3e21a7261b | ||
|
|
fd439fab62 | ||
|
|
976aa6b416 | ||
|
|
b3a0383ca3 | ||
|
|
c13f129339 | ||
|
|
748d8c8957 | ||
|
|
4d379efcbb | ||
|
|
e5e6a4b5ae | ||
|
|
df18e8c55b | ||
|
|
f4e17d8b0b | ||
|
|
e5c69511bc | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b |
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -229,7 +229,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
@@ -283,7 +283,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Check Markdown format
|
||||
uses: DavidAnson/markdownlint-cli2-action@v20
|
||||
uses: DavidAnson/markdownlint-cli2-action@v22
|
||||
with:
|
||||
globs: |
|
||||
CONTRIBUTING.md
|
||||
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -412,8 +412,8 @@ the source file in the `Help:` field:
|
||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
||||
and you can also run this directly, optionally with the name of a backend
|
||||
as argument to only update the docs for a specific backend.
|
||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
||||
the release process. Since any manual changes in the autogenerated sections
|
||||
- **Do not** commit the updated Markdown files. This operation is run as part
|
||||
of the release process. Since any manual changes in the autogenerated sections
|
||||
of the Markdown files will then be lost, we have a pull request check that
|
||||
reports error for any changes within the autogenerated sections. Should you
|
||||
have done manual changes outside of the autogenerated sections they must be
|
||||
@@ -580,7 +580,8 @@ remote or an fs.
|
||||
make sure we can encode any path name and `rclone info` to help determine the
|
||||
encodings needed
|
||||
- `rclone purge -v TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
- `rclone test info --all --remote-encoding None -vv --write-json remote.json
|
||||
TestRemote:rclone-info`
|
||||
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
- open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
|
||||
@@ -2,27 +2,27 @@
|
||||
|
||||
Current active maintainers of rclone are:
|
||||
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------------- | :-------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Name | GitHub ID | Specific Responsibilities |
|
||||
| :--------------- | :---------------- | :------------------------------------- |
|
||||
| Nick Craig-Wood | @ncw | overall project health |
|
||||
| Stefan Breunig | @breunigs | |
|
||||
| Ishuah Kariuki | @ishuah | |
|
||||
| Remus Bunduc | @remusb | cache backend |
|
||||
| Fabian Möller | @B4dM4n | |
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud, yandex & compress backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
| Max Sum | @Max-Sum | union backend |
|
||||
| Fred | @creativeprojects | seafile backend |
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
| Dan McArdle | @dmcardle | gitannex |
|
||||
| Sam Harrison | @childish-sambino | filescom |
|
||||
|
||||
## This is a work in progress draft
|
||||
|
||||
|
||||
@@ -109,6 +109,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
@@ -55,6 +55,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/shade"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -24,7 +25,8 @@ import (
|
||||
var (
|
||||
hashType = hash.MD5
|
||||
// the object storage is persistent
|
||||
buckets = newBucketsInfo()
|
||||
buckets = newBucketsInfo()
|
||||
errWriteOnly = errors.New("can't read when using --memory-discard")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -33,12 +35,32 @@ func init() {
|
||||
Name: "memory",
|
||||
Description: "In memory object storage system.",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{},
|
||||
Options: []fs.Option{{
|
||||
Name: "discard",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `If set all writes will be discarded and reads will return an error
|
||||
|
||||
If set then when files are uploaded the contents not be saved. The
|
||||
files will appear to have been uploaded but will give an error on
|
||||
read. Files will have their MD5 sum calculated on upload which takes
|
||||
very little CPU time and allows the transfers to be checked.
|
||||
|
||||
This can be useful for testing performance.
|
||||
|
||||
Probably most easily used by using the connection string syntax:
|
||||
|
||||
:memory,discard:bucket
|
||||
|
||||
`,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct{}
|
||||
type Options struct {
|
||||
Discard bool `config:"discard"`
|
||||
}
|
||||
|
||||
// Fs represents a remote memory server
|
||||
type Fs struct {
|
||||
@@ -164,6 +186,7 @@ type objectData struct {
|
||||
hash string
|
||||
mimeType string
|
||||
data []byte
|
||||
size int64
|
||||
}
|
||||
|
||||
// Object describes a memory object
|
||||
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.od.hash == "" {
|
||||
if o.od.hash == "" && !o.fs.opt.Discard {
|
||||
sum := md5.Sum(o.od.data)
|
||||
o.od.hash = hex.EncodeToString(sum[:])
|
||||
}
|
||||
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(len(o.od.data))
|
||||
return o.od.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.Discard {
|
||||
return nil, errWriteOnly
|
||||
}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
data, err := io.ReadAll(in)
|
||||
var data []byte
|
||||
var size int64
|
||||
var hash string
|
||||
if o.fs.opt.Discard {
|
||||
h := md5.New()
|
||||
size, err = io.Copy(h, in)
|
||||
hash = hex.EncodeToString(h.Sum(nil))
|
||||
} else {
|
||||
data, err = io.ReadAll(in)
|
||||
size = int64(len(data))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
o.od = &objectData{
|
||||
data: data,
|
||||
hash: "",
|
||||
size: size,
|
||||
hash: hash,
|
||||
modTime: src.ModTime(ctx),
|
||||
mimeType: fs.MimeType(ctx, src),
|
||||
}
|
||||
|
||||
@@ -222,3 +222,11 @@ type UserInfo struct {
|
||||
} `json:"steps"`
|
||||
} `json:"journey"`
|
||||
}
|
||||
|
||||
// DiffResult is the response from /diff
|
||||
type DiffResult struct {
|
||||
Result int `json:"result"`
|
||||
DiffID int64 `json:"diffid"`
|
||||
Entries []map[string]any `json:"entries"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
@@ -171,6 +171,7 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
lastDiffID int64 // change tracking state for diff long-polling
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeNotify implements fs.Features.ChangeNotify
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Start long-poll loop in background
|
||||
go f.changeNotifyLoop(ctx, notify, ch)
|
||||
}
|
||||
|
||||
// changeNotifyLoop contains the blocking long-poll logic.
|
||||
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Standard polling interval
|
||||
interval := 30 * time.Second
|
||||
|
||||
// Start with diffID = 0 to get the current state
|
||||
var diffID int64
|
||||
|
||||
// Helper to process changes from the diff API
|
||||
handleChanges := func(entries []map[string]any) {
|
||||
notifiedPaths := make(map[string]bool)
|
||||
|
||||
for _, entry := range entries {
|
||||
meta, ok := entry["metadata"].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Robust extraction of ParentFolderID
|
||||
var pid int64
|
||||
if val, ok := meta["parentfolderid"]; ok {
|
||||
switch v := val.(type) {
|
||||
case float64:
|
||||
pid = int64(v)
|
||||
case int64:
|
||||
pid = v
|
||||
case int:
|
||||
pid = int64(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the path using dirCache.GetInv
|
||||
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
|
||||
dirID := fmt.Sprintf("d%d", pid)
|
||||
parentPath, ok := f.dirCache.GetInv(dirID)
|
||||
|
||||
if !ok {
|
||||
// Parent not in cache, so we can ignore this change as it is outside
|
||||
// of what the mount has seen or cares about.
|
||||
continue
|
||||
}
|
||||
|
||||
name, _ := meta["name"].(string)
|
||||
fullPath := path.Join(parentPath, name)
|
||||
|
||||
// Determine EntryType (File or Directory)
|
||||
entryType := fs.EntryObject
|
||||
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
|
||||
entryType = fs.EntryDirectory
|
||||
}
|
||||
|
||||
// Deduplicate notifications for this batch
|
||||
if !notifiedPaths[fullPath] {
|
||||
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
|
||||
notify(fullPath, entryType)
|
||||
notifiedPaths[fullPath] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Check context and channel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case newInterval, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
interval = newInterval
|
||||
default:
|
||||
}
|
||||
|
||||
// Setup /diff Request
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/diff",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
if diffID != 0 {
|
||||
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
|
||||
opts.Parameters.Set("block", "1")
|
||||
} else {
|
||||
opts.Parameters.Set("last", "0")
|
||||
}
|
||||
|
||||
// Perform Long-Poll
|
||||
// Timeout set to 90s (server usually blocks for 60s max)
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||
var result api.DiffResult
|
||||
|
||||
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
// Ignore timeout errors as they are normal for long-polling
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If result is not 0, reset DiffID to resync
|
||||
if result.Result != 0 {
|
||||
diffID = 0
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if result.DiffID != 0 {
|
||||
diffID = result.DiffID
|
||||
f.lastDiffID = diffID
|
||||
}
|
||||
|
||||
if len(result.Entries) > 0 {
|
||||
handleChanges(result.Entries)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
@@ -1401,6 +1533,7 @@ var (
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
name: Linode
|
||||
description: Linode Object Storage
|
||||
endpoint:
|
||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
||||
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
|
||||
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
|
||||
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
|
||||
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
|
||||
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
|
||||
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
|
||||
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
|
||||
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
|
||||
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
|
||||
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
|
||||
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
|
||||
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
|
||||
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
|
||||
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
|
||||
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
|
||||
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
|
||||
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
|
||||
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
|
||||
@@ -2,7 +2,17 @@ name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
ru-3: St. Petersburg
|
||||
ru-7: Moscow
|
||||
gis-1: Moscow
|
||||
kz-1: Kazakhstan
|
||||
uz-2: Uzbekistan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.ru-1.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-3.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.kz-1.storage.selcloud.ru: Kazakhstan
|
||||
s3.uz-2.storage.selcloud.ru: Uzbekistan
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
|
||||
@@ -2928,7 +2928,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: types.MetadataDirectiveCopy,
|
||||
}
|
||||
|
||||
if srcObj.storageClass != nil {
|
||||
req.StorageClass = types.StorageClass(*srcObj.storageClass)
|
||||
}
|
||||
// Build upload options including headers and metadata
|
||||
ci := fs.GetConfig(ctx)
|
||||
uploadOptions := fs.MetadataAsOpenOptions(ctx)
|
||||
@@ -4501,7 +4503,12 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
ACL: types.ObjectCannedACL(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
}
|
||||
|
||||
if tierObj, ok := src.(fs.GetTierer); ok {
|
||||
tier := tierObj.GetTier()
|
||||
if tier != "" {
|
||||
ui.req.StorageClass = types.StorageClass(strings.ToUpper(tier))
|
||||
}
|
||||
}
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
|
||||
27
backend/shade/api/types.go
Normal file
27
backend/shade/api/types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package api has type definitions for shade
|
||||
package api
|
||||
|
||||
// ListDirResponse -------------------------------------------------
|
||||
// Format from shade api
|
||||
type ListDirResponse struct {
|
||||
Type string `json:"type"` // "file" or "tree"
|
||||
Path string `json:"path"` // Full path including root
|
||||
Ino int `json:"ino"` // inode number
|
||||
Mtime int64 `json:"mtime"` // Modified time in milliseconds
|
||||
Ctime int64 `json:"ctime"` // Created time in milliseconds
|
||||
Size int64 `json:"size"` // Size in bytes
|
||||
Hash string `json:"hash"` // MD5 hash
|
||||
Draft bool `json:"draft"` // Whether this is a draft file
|
||||
}
|
||||
|
||||
// PartURL Type for multipart upload/download
|
||||
type PartURL struct {
|
||||
URL string `json:"url"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string
|
||||
PartNumber int32
|
||||
}
|
||||
1039
backend/shade/shade.go
Normal file
1039
backend/shade/shade.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/shade/shade_test.go
Normal file
21
backend/shade/shade_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package shade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestShade"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*shade.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
336
backend/shade/upload.go
Normal file
336
backend/shade/upload.go
Normal file
@@ -0,0 +1,336 @@
|
||||
//multipart upload for shade
|
||||
|
||||
package shade
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
type shadeChunkWriter struct {
|
||||
initToken string
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
o *Object
|
||||
completedParts []api.CompletedPart
|
||||
completedPartsMu sync.Mutex
|
||||
}
|
||||
|
||||
// uploadMultipart handles multipart upload for larger files
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var shadeWriter = chunkWriter.(*shadeChunkWriter)
|
||||
o.size = shadeWriter.size
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
fs.FixRangeOption(options, size)
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 640 GB.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
token, err := o.fs.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to get token: %w", err)
|
||||
}
|
||||
|
||||
err = f.ensureParentDirectories(ctx, remote)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
|
||||
}
|
||||
|
||||
fullPath := remote
|
||||
if f.root != "" {
|
||||
fullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
// Initiate multipart upload
|
||||
type initRequest struct {
|
||||
Path string `json:"path"`
|
||||
PartSize int64 `json:"partSize"`
|
||||
}
|
||||
reqBody := initRequest{
|
||||
Path: fullPath,
|
||||
PartSize: int64(chunkSize),
|
||||
}
|
||||
|
||||
var initResp struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
|
||||
RootURL: o.fs.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunkWriter := &shadeChunkWriter{
|
||||
initToken: initResp.Token,
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: f.opt.Concurrency,
|
||||
LeavePartsOnError: false,
|
||||
}
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read chunk
|
||||
var chunk bytes.Buffer
|
||||
n, err := io.Copy(&chunk, reader)
|
||||
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read chunk: %w", err)
|
||||
}
|
||||
// Get presigned URL for this part
|
||||
var partURL api.PartURL
|
||||
|
||||
partOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get part URL: %w", err)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: partURL.URL,
|
||||
Body: &chunk,
|
||||
ContentType: "",
|
||||
ContentLength: &n,
|
||||
}
|
||||
|
||||
// Add headers
|
||||
var uploadRes *http.Response
|
||||
if len(partURL.Headers) > 0 {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
for k, v := range partURL.Headers {
|
||||
opts.ExtraHeaders[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
uploadRes, err = s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
|
||||
}
|
||||
|
||||
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(uploadRes.Body)
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Get ETag from response
|
||||
etag := uploadRes.Header.Get("ETag")
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
|
||||
s.completedPartsMu.Lock()
|
||||
defer s.completedPartsMu.Unlock()
|
||||
s.completedParts = append(s.completedParts, api.CompletedPart{
|
||||
PartNumber: int32(chunkNumber + 1),
|
||||
ETag: etag,
|
||||
})
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close complete chunked writer finalising the file.
|
||||
func (s *shadeChunkWriter) Close(ctx context.Context) error {
|
||||
|
||||
// Complete multipart upload
|
||||
sort.Slice(s.completedParts, func(i, j int) bool {
|
||||
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
|
||||
})
|
||||
|
||||
type completeRequest struct {
|
||||
Parts []api.CompletedPart `json:"parts"`
|
||||
}
|
||||
var completeBody completeRequest
|
||||
|
||||
if s.completedParts == nil {
|
||||
completeBody = completeRequest{Parts: []api.CompletedPart{}}
|
||||
} else {
|
||||
completeBody = completeRequest{Parts: s.completedParts}
|
||||
}
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
completeOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
var response http.Response
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
|
||||
|
||||
if err != nil && res == nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusTooManyRequests {
|
||||
return true, err // Retry on 429
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort chunk write
|
||||
//
|
||||
// You can and should call Abort without calling Close.
|
||||
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
|
||||
return false, nil // Don't retry abort
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -84,6 +84,7 @@ docs = [
|
||||
"protondrive.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"shade.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
|
||||
package cmount
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -211,12 +210,6 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
// We can't seek in directories and FUSE should know that so
|
||||
// return an error if ofst is ever set.
|
||||
if ofst > 0 {
|
||||
// However openbsd doesn't seem to know this - perhaps a bug in its
|
||||
// FUSE implementation or a bug in cgofuse?
|
||||
// See: https://github.com/billziss-gh/cgofuse/issues/49
|
||||
if runtime.GOOS == "openbsd" {
|
||||
return 0
|
||||
}
|
||||
return -fuse.ESPIPE
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
@@ -8,9 +8,9 @@ package cmount
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
@@ -59,14 +59,12 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
} else {
|
||||
options = append(options, "-o", "fsname="+device)
|
||||
options = append(options, "-o", "subtype=rclone")
|
||||
if runtime.GOOS != "openbsd" {
|
||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
}
|
||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
options = append(options, "-o", "atomic_o_trunc")
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows) && (!race || !windows)
|
||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (openbsd && cgo && cmount) || (windows && cmount))
|
||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||
|
||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||
//
|
||||
|
||||
@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||
use |-R| to make them recurse.
|
||||
|
||||
List commands prefer a recursive method that uses more memory but fewer
|
||||
transactions by default. Use |--disable ListR| to suppress the behavior.
|
||||
See [|--fast-list|](/docs/#fast-list) for more details.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).`, "|", "`")
|
||||
|
||||
@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
`--auth-key` is not provided then `serve s3` will allow anonymous
|
||||
access.
|
||||
|
||||
Like all rclone flags `--auth-key` can be set via environment
|
||||
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
|
||||
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
|
||||
`accessKey,secretKey` has a comma in, this means it needs to be in
|
||||
quotes.
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user,pass"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Or to supply multiple identities:
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Setting this variable without quotes will produce an error.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
|
||||
@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w.s3Secret = getAuthSecret(opt.AuthKey)
|
||||
}
|
||||
|
||||
authList, err := authlistResolver(opt.AuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing auth list failed: %q", err)
|
||||
}
|
||||
|
||||
var newLogger logger
|
||||
w.faker = gofakes3.New(
|
||||
newBackend(w),
|
||||
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
gofakes3.WithLogger(newLogger),
|
||||
gofakes3.WithRequestID(rand.Uint64()),
|
||||
gofakes3.WithoutVersioning(),
|
||||
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
|
||||
gofakes3.WithV4Auth(authList),
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w._vfs = vfs.New(f, vfsOpt)
|
||||
|
||||
if len(opt.AuthKey) > 0 {
|
||||
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
|
||||
w.faker.AddAuthKeys(authList)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package s3
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
|
||||
}
|
||||
}
|
||||
|
||||
func authlistResolver(list []string) map[string]string {
|
||||
func authlistResolver(list []string) (map[string]string, error) {
|
||||
authList := make(map[string]string)
|
||||
for _, v := range list {
|
||||
parts := strings.Split(v, ",")
|
||||
if len(parts) != 2 {
|
||||
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
|
||||
continue
|
||||
return nil, errors.New("invalid auth pair: expecting a single comma")
|
||||
}
|
||||
authList[parts[0]] = parts[1]
|
||||
}
|
||||
return authList
|
||||
return authList, nil
|
||||
}
|
||||
|
||||
@@ -202,6 +202,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||
|
||||
@@ -1055,3 +1055,8 @@ put them back in again. -->
|
||||
- Vladislav Tropnikov <vtr.name@gmail.com>
|
||||
- Leo <i@hardrain980.com>
|
||||
- Johannes Rothe <mail@johannes-rothe.de>
|
||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||
- jhasse-shade <jacob@shade.inc>
|
||||
- vyv03354 <VYV03354@nifty.ne.jp>
|
||||
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
|
||||
|
||||
@@ -6,6 +6,22 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.72.1 - 2025-12-10
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
|
||||
|
||||
- Bug Fixes
|
||||
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
|
||||
- doc fixes (Duncan Smart, Nick Craig-Wood)
|
||||
- configfile: Fix piped config support (Jonas Tingeborn)
|
||||
- log
|
||||
- Fix PID not included in JSON log output (Tingsong Xu)
|
||||
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
|
||||
- Google Cloud Storage
|
||||
- Improve endpoint parameter docs (Johannes Rothe)
|
||||
- S3
|
||||
- Add missing regions for Selectel provider (Nick Craig-Wood)
|
||||
|
||||
## v1.72.0 - 2025-11-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
||||
|
||||
@@ -336,7 +336,7 @@ full new copy of the file.
|
||||
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||
as opposed to with a clear warning as in macFUSE.
|
||||
|
||||
# Mounting on Linux
|
||||
## Mounting on Linux
|
||||
|
||||
On newer versions of Ubuntu, you may encounter the following error when running
|
||||
`rclone mount`:
|
||||
|
||||
@@ -82,6 +82,7 @@ See the following for detailed instructions for
|
||||
- [rsync.net](/sftp/#rsync-net)
|
||||
- [Seafile](/seafile/)
|
||||
- [SFTP](/sftp/)
|
||||
- [Shade](/shade/)
|
||||
- [Sia](/sia/)
|
||||
- [SMB](/smb/)
|
||||
- [Storj](/storj/)
|
||||
@@ -750,21 +751,21 @@ object also.
|
||||
Here is a table of standard system metadata which, if appropriate, a
|
||||
backend may implement.
|
||||
|
||||
| key | description | example |
|
||||
|---------------------|-------------|---------|
|
||||
| mode | File type and mode: octal, unix style | 0100664 |
|
||||
| uid | User ID of owner: decimal number | 500 |
|
||||
| gid | Group ID of owner: decimal number | 500 |
|
||||
| rdev | Device ID (if special file) => hexadecimal | 0 |
|
||||
| atime | Time of last access: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| mtime | Time of last modification: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| btime | Time of file creation (birth): RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| utime | Time of file upload: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| cache-control | Cache-Control header | no-cache |
|
||||
| key | description | example |
|
||||
| --- | ----------- | ------- |
|
||||
| mode | File type and mode: octal, unix style | 0100664 |
|
||||
| uid | User ID of owner: decimal number | 500 |
|
||||
| gid | Group ID of owner: decimal number | 500 |
|
||||
| rdev | Device ID (if special file) => hexadecimal | 0 |
|
||||
| atime | Time of last access: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| mtime | Time of last modification: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| btime | Time of file creation (birth): RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| utime | Time of file upload: RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 |
|
||||
| cache-control | Cache-Control header | no-cache |
|
||||
| content-disposition | Content-Disposition header | inline |
|
||||
| content-encoding | Content-Encoding header | gzip |
|
||||
| content-language | Content-Language header | en-US |
|
||||
| content-type | Content-Type header | text/plain |
|
||||
| content-encoding | Content-Encoding header | gzip |
|
||||
| content-language | Content-Language header | en-US |
|
||||
| content-type | Content-Type header | text/plain |
|
||||
|
||||
The metadata keys `mtime` and `content-type` will take precedence if
|
||||
supplied in the metadata over reading the `Content-Type` or
|
||||
@@ -1190,7 +1191,8 @@ on any OS, and the value is defined as following:
|
||||
|
||||
- On Windows: `%HOME%` if defined, else `%USERPROFILE%`, or else `%HOMEDRIVE%\%HOMEPATH%`.
|
||||
- On Unix: `$HOME` if defined, else by looking up current user in OS-specific user
|
||||
database (e.g. passwd file), or else use the result from shell command `cd && pwd`.
|
||||
database (e.g. passwd file), or else use the result from shell command
|
||||
`cd && pwd`.
|
||||
|
||||
If you run `rclone config file` you will see where the default location is for
|
||||
you. Running `rclone config touch` will ensure a configuration file exists,
|
||||
@@ -3437,7 +3439,7 @@ many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/c
|
||||
string. For example
|
||||
|
||||
| Environment variable | Equivalent options |
|
||||
|----------------------|--------------------|
|
||||
| -------------------- | ------------------ |
|
||||
| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` |
|
||||
| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` |
|
||||
|
||||
@@ -16,7 +16,7 @@ image](https://securebuild.com/images/rclone) through our partner
|
||||
## Release {{% version %}} OS requirements {#osrequirements}
|
||||
|
||||
| OS | Minimum Version |
|
||||
|:-------:|:-------:|
|
||||
| :---: | :---: |
|
||||
| Linux | Kernel 3.2 |
|
||||
| macOS | 12 (Monterey) |
|
||||
| Windows | 10, Server 2016 |
|
||||
@@ -31,7 +31,7 @@ in the Go Wiki.
|
||||
## Release {{% version %}} {#release}
|
||||
|
||||
| Arch-OS | Windows | macOS | Linux | .deb | .rpm | FreeBSD | NetBSD | OpenBSD | Plan9 | Solaris |
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| :-----: | :-----: | :---: | :---: | :--: | :--: | :-----: | :----: | :-----: | :---: | :-----: |
|
||||
| Intel/AMD - 64 Bit | {{< download windows amd64 >}} | {{< download osx amd64 >}} | {{< download linux amd64 >}} | {{< download linux amd64 deb >}} | {{< download linux amd64 rpm >}} | {{< download freebsd amd64 >}} | {{< download netbsd amd64 >}} | {{< download openbsd amd64 >}} | {{< download plan9 amd64 >}} | {{< download solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< download windows 386 >}} | - | {{< download linux 386 >}} | {{< download linux 386 deb >}} | {{< download linux 386 rpm >}} | {{< download freebsd 386 >}} | {{< download netbsd 386 >}} | {{< download openbsd 386 >}} | {{< download plan9 386 >}} | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
|
||||
@@ -120,7 +120,7 @@ If you would like to download the current version (maybe from a
|
||||
script) from a URL which doesn't change then you can use these links.
|
||||
|
||||
| Arch-OS | Windows | macOS | Linux | .deb | .rpm | FreeBSD | NetBSD | OpenBSD | Plan9 | Solaris |
|
||||
|:-------:|:-------:|:-----:|:-----:|:----:|:----:|:-------:|:------:|:-------:|:-----:|:-------:|
|
||||
| :-----: | :-----: | :---: | :---: | :--: | :--: | :-----: | :----: | :-----: | :---: | :-----: |
|
||||
| Intel/AMD - 64 Bit | {{< cdownload windows amd64 >}} | {{< cdownload osx amd64 >}} | {{< cdownload linux amd64 >}} | {{< cdownload linux amd64 deb >}} | {{< cdownload linux amd64 rpm >}} | {{< cdownload freebsd amd64 >}} | {{< cdownload netbsd amd64 >}} | {{< cdownload openbsd amd64 >}} | {{< cdownload plan9 amd64 >}} | {{< cdownload solaris amd64 >}} |
|
||||
| Intel/AMD - 32 Bit | {{< cdownload windows 386 >}} | - | {{< cdownload linux 386 >}} | {{< cdownload linux 386 deb >}} | {{< cdownload linux 386 rpm >}} | {{< cdownload freebsd 386 >}} | {{< cdownload netbsd 386 >}} | {{< cdownload openbsd 386 >}} | {{< cdownload plan9 386 >}} | - |
|
||||
| ARMv5 - 32 Bit NOHF | - | - | {{< cdownload linux arm >}} | {{< cdownload linux arm deb >}} | {{< cdownload linux arm rpm >}} | {{< cdownload freebsd arm >}} | {{< cdownload netbsd arm >}} | - | - | - |
|
||||
@@ -137,7 +137,7 @@ Older downloads can be found at <https://downloads.rclone.org/>
|
||||
The latest `rclone` version working for:
|
||||
|
||||
| OS | Maximum rclone version |
|
||||
|:-------:|:-------:|
|
||||
| :---: | :---: |
|
||||
| Windows 7 | v1.63.1 |
|
||||
| Windows Server 2008 | v1.63.1 |
|
||||
| Windows Server 2012 | v1.63.1 |
|
||||
|
||||
@@ -202,28 +202,28 @@ them into regular expressions.
|
||||
|
||||
## Filter pattern examples {#examples}
|
||||
|
||||
| Description | Pattern | Matches | Does not match |
|
||||
| ----------- |-------- | ------- | -------------- |
|
||||
| Wildcard | `*.jpg` | `/file.jpg` | `/file.png` |
|
||||
| | | `/dir/file.jpg` | `/dir/file.png` |
|
||||
| Rooted | `/*.jpg` | `/file.jpg` | `/file.png` |
|
||||
| | | `/file2.jpg` | `/dir/file.jpg` |
|
||||
| Alternates | `*.{jpg,png}` | `/file.jpg` | `/file.gif` |
|
||||
| | | `/dir/file.png` | `/dir/file.gif` |
|
||||
| Path Wildcard | `dir/**` | `/dir/anyfile` | `file.png` |
|
||||
| | | `/subdir/dir/subsubdir/anyfile` | `/subdir/file.png` |
|
||||
| Any Char | `*.t?t` | `/file.txt` | `/file.qxt` |
|
||||
| | | `/dir/file.tzt` | `/dir/file.png` |
|
||||
| Range | `*.[a-z]` | `/file.a` | `/file.0` |
|
||||
| | | `/dir/file.b` | `/dir/file.1` |
|
||||
| Escape | `*.\?\?\?` | `/file.???` | `/file.abc` |
|
||||
| | | `/dir/file.???` | `/dir/file.def` |
|
||||
| Class | `*.\d\d\d` | `/file.012` | `/file.abc` |
|
||||
| | | `/dir/file.345` | `/dir/file.def` |
|
||||
| Regexp | `*.{{jpe?g}}` | `/file.jpeg` | `/file.png` |
|
||||
| | | `/dir/file.jpg` | `/dir/file.jpeeg` |
|
||||
| Rooted Regexp | `/{{.*\.jpe?g}}` | `/file.jpeg` | `/file.png` |
|
||||
| | | `/file.jpg` | `/dir/file.jpg` |
|
||||
| Description | Pattern | Matches | Does not match |
|
||||
| ----------- | ---------------- | ------------------------------- | ------------------ |
|
||||
| Wildcard | `*.jpg` | `/file.jpg` | `/file.png` |
|
||||
| | | `/dir/file.jpg` | `/dir/file.png` |
|
||||
| Rooted | `/*.jpg` | `/file.jpg` | `/file.png` |
|
||||
| | | `/file2.jpg` | `/dir/file.jpg` |
|
||||
| Alternates | `*.{jpg,png}` | `/file.jpg` | `/file.gif` |
|
||||
| | | `/dir/file.png` | `/dir/file.gif` |
|
||||
| Path Wildcard | `dir/**` | `/dir/anyfile` | `file.png` |
|
||||
| | | `/subdir/dir/subsubdir/anyfile` | `/subdir/file.png` |
|
||||
| Any Char | `*.t?t` | `/file.txt` | `/file.qxt` |
|
||||
| | | `/dir/file.tzt` | `/dir/file.png` |
|
||||
| Range | `*.[a-z]` | `/file.a` | `/file.0` |
|
||||
| | | `/dir/file.b` | `/dir/file.1` |
|
||||
| Escape | `*.\?\?\?` | `/file.???` | `/file.abc` |
|
||||
| | | `/dir/file.???` | `/dir/file.def` |
|
||||
| Class | `*.\d\d\d` | `/file.012` | `/file.abc` |
|
||||
| | | `/dir/file.345` | `/dir/file.def` |
|
||||
| Regexp | `*.{{jpe?g}}` | `/file.jpeg` | `/file.png` |
|
||||
| | | `/dir/file.jpg` | `/dir/file.jpeeg` |
|
||||
| Rooted Regexp | `/{{.*\.jpe?g}}` | `/file.jpeg` | `/file.png` |
|
||||
| | | `/file.jpg` | `/dir/file.jpg` |
|
||||
|
||||
## How filter rules are applied to files {#how-filter-rules-work}
|
||||
|
||||
|
||||
@@ -285,8 +285,8 @@ rclone v1.49.1
|
||||
- go version: go1.12.9
|
||||
```
|
||||
|
||||
There are a few command line options to consider when starting an rclone Docker container
|
||||
from the rclone image.
|
||||
There are a few command line options to consider when starting an rclone Docker
|
||||
container from the rclone image.
|
||||
|
||||
- You need to mount the host rclone config dir at `/config/rclone` into the Docker
|
||||
container. Due to the fact that rclone updates tokens inside its config file,
|
||||
@@ -300,8 +300,8 @@ from the rclone image.
|
||||
data files reside on the host with a non-root UID:GID, you need to pass these
|
||||
on the container start command line.
|
||||
|
||||
- If you want to access the RC interface (either via the API or the Web UI), it is
|
||||
required to set the `--rc-addr` to `:5572` in order to connect to it from outside
|
||||
- If you want to access the RC interface (either via the API or the Web UI), it
|
||||
is required to set the `--rc-addr` to `:5572` in order to connect to it from outside
|
||||
the container. An explanation about why this is necessary can be found in an old
|
||||
[pythonspeed.com](https://web.archive.org/web/20200808071950/https://pythonspeed.com/articles/docker-connection-refused/)
|
||||
article.
|
||||
@@ -309,9 +309,9 @@ from the rclone image.
|
||||
probably set it to listen to localhost only, with `127.0.0.1:5572` as the
|
||||
value for `--rc-addr`
|
||||
|
||||
- It is possible to use `rclone mount` inside a userspace Docker container, and expose
|
||||
the resulting fuse mount to the host. The exact `docker run` options to do that
|
||||
might vary slightly between hosts. See, e.g. the discussion in this
|
||||
- It is possible to use `rclone mount` inside a userspace Docker container, and
|
||||
expose the resulting fuse mount to the host. The exact `docker run` options to
|
||||
do that might vary slightly between hosts. See, e.g. the discussion in this
|
||||
[thread](https://github.com/moby/moby/issues/9448).
|
||||
|
||||
You also need to mount the host `/etc/passwd` and `/etc/group` for fuse to work
|
||||
@@ -542,8 +542,8 @@ To override them set the corresponding options (as command-line arguments, or as
|
||||
|
||||
After installing and configuring rclone, as described above, you are ready to use
|
||||
rclone as an interactive command line utility. If your goal is to perform *periodic*
|
||||
operations, such as a regular [sync](https://rclone.org/commands/rclone_sync/), you
|
||||
will probably want to configure your rclone command in your operating system's
|
||||
operations, such as a regular [sync](https://rclone.org/commands/rclone_sync/),
|
||||
you will probably want to configure your rclone command in your operating system's
|
||||
scheduler. If you need to expose *service*-like features, such as
|
||||
[remote control](https://rclone.org/rc/), [GUI](https://rclone.org/gui/),
|
||||
[serve](https://rclone.org/commands/rclone_serve/) or [mount](https://rclone.org/commands/rclone_mount/),
|
||||
@@ -583,9 +583,9 @@ c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclo
|
||||
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
|
||||
mounted drives created as Administrator are not visible to other accounts, not even
|
||||
the account that was elevated as Administrator. By running the mount command as the
|
||||
built-in `SYSTEM` user account, it will create drives accessible for everyone on
|
||||
the system. Both scheduled task and Windows service can be used to achieve this.
|
||||
the account that was elevated as Administrator. By running the mount command as
|
||||
the built-in `SYSTEM` user account, it will create drives accessible for everyone
|
||||
on the system. Both scheduled task and Windows service can be used to achieve this.
|
||||
|
||||
NOTE: Remember that when rclone runs as the `SYSTEM` user, the user profile
|
||||
that it sees will not be yours. This means that if you normally run rclone with
|
||||
@@ -615,8 +615,8 @@ will often give you better results.
|
||||
|
||||
#### Start from Task Scheduler
|
||||
|
||||
Task Scheduler is an administrative tool built into Windows, and it can be used to
|
||||
configure rclone to be started automatically in a highly configurable way, e.g.
|
||||
Task Scheduler is an administrative tool built into Windows, and it can be used
|
||||
to configure rclone to be started automatically in a highly configurable way, e.g.
|
||||
periodically on a schedule, on user log on, or at system startup. It can run
|
||||
be configured to run as the current user, or for a mount command that needs to
|
||||
be available to all users it can run as the `SYSTEM` user.
|
||||
@@ -656,18 +656,18 @@ To Windows service running any rclone command, the excellent third-party utility
|
||||
[NSSM](http://nssm.cc), the "Non-Sucking Service Manager", can be used.
|
||||
It includes some advanced features such as adjusting process priority, defining
|
||||
process environment variables, redirect to file anything written to stdout, and
|
||||
customized response to different exit codes, with a GUI to configure everything from
|
||||
(although it can also be used from command line ).
|
||||
customized response to different exit codes, with a GUI to configure everything
|
||||
from (although it can also be used from command line ).
|
||||
|
||||
There are also several other alternatives. To mention one more,
|
||||
[WinSW](https://github.com/winsw/winsw), "Windows Service Wrapper", is worth checking
|
||||
out. It requires .NET Framework, but it is preinstalled on newer versions of Windows,
|
||||
and it also provides alternative standalone distributions which includes necessary
|
||||
runtime (.NET 5). WinSW is a command-line only utility, where you have to manually
|
||||
create an XML file with service configuration. This may be a drawback for some, but
|
||||
it can also be an advantage as it is easy to back up and reuse the configuration
|
||||
settings, without having go through manual steps in a GUI. One thing to note is that
|
||||
by default it does not restart the service on error, one have to explicit enable
|
||||
create an XML file with service configuration. This may be a drawback for some,
|
||||
but it can also be an advantage as it is easy to back up and reuse the configuration
|
||||
settings, without having go through manual steps in a GUI. One thing to note is
|
||||
that by default it does not restart the service on error, one have to explicit enable
|
||||
this in the configuration file (via the "onfailure" parameter).
|
||||
|
||||
### Autostart on Linux
|
||||
@@ -676,8 +676,8 @@ this in the configuration file (via the "onfailure" parameter).
|
||||
|
||||
To always run rclone in background, relevant for mount commands etc,
|
||||
you can use systemd to set up rclone as a system or user service. Running as a
|
||||
system service ensures that it is run at startup even if the user it is running as
|
||||
has no active session. Running rclone as a user service ensures that it only
|
||||
system service ensures that it is run at startup even if the user it is running
|
||||
as has no active session. Running rclone as a user service ensures that it only
|
||||
starts after the configured user has logged into the system.
|
||||
|
||||
#### Run periodically from cron
|
||||
|
||||
@@ -14,61 +14,62 @@ show through.
|
||||
|
||||
Here is an overview of the major features of each cloud storage system.
|
||||
|
||||
| Name | Hash | ModTime | Case Insensitive | Duplicate Files | MIME Type | Metadata |
|
||||
| ---------------------------- |:-----------------:|:-------:|:----------------:|:---------------:|:---------:|:--------:|
|
||||
| 1Fichier | Whirlpool | - | No | Yes | R | - |
|
||||
| Akamai Netstorage | MD5, SHA256 | R/W | No | No | R | - |
|
||||
| Amazon S3 (or S3 compatible) | MD5 | R/W | No | No | R/W | RWU |
|
||||
| Backblaze B2 | SHA1 | R/W | No | No | R/W | - |
|
||||
| Box | SHA1 | R/W | Yes | No | - | - |
|
||||
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
|
||||
| Cloudinary | MD5 | R | No | Yes | - | - |
|
||||
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
|
||||
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
|
||||
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
|
||||
| Files.com | MD5, CRC32 | DR/W | Yes | No | R | - |
|
||||
| FTP | - | R/W ¹⁰ | No | No | - | - |
|
||||
| Gofile | MD5 | DR/W | No | Yes | R | - |
|
||||
| Google Cloud Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| Google Drive | MD5, SHA1, SHA256 | DR/W | No | Yes | R/W | DRWU |
|
||||
| Google Photos | - | - | No | Yes | R | - |
|
||||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | R |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
| Linkbox | - | R | No | No | - | - |
|
||||
| Mail.ru Cloud | Mailru ⁶ | R/W | Yes | No | - | - |
|
||||
| Mega | - | - | No | Yes | - | - |
|
||||
| Memory | MD5 | R/W | No | No | - | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| Microsoft Azure Files Storage | MD5 | R/W | Yes | No | R/W | - |
|
||||
| Microsoft OneDrive | QuickXorHash ⁵ | DR/W | Yes | No | R | DRW |
|
||||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | RU |
|
||||
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
|
||||
| PikPak | MD5 | R | No | No | R | - |
|
||||
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
| put.io | CRC-32 | R/W | No | Yes | R | - |
|
||||
| Proton Drive | SHA1 | R/W | No | No | R | - |
|
||||
| QingStor | MD5 | - ⁹ | No | No | R/W | - |
|
||||
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
||||
| Seafile | - | - | No | No | - | - |
|
||||
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
||||
| Sia | - | - | No | No | - | - |
|
||||
| SMB | - | R/W | Yes | No | - | - |
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
| Storj | - | R | No | No | - | - |
|
||||
| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - |
|
||||
| Uptobox | - | - | No | Yes | - | - |
|
||||
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
|
||||
| Yandex Disk | MD5 | R/W | No | No | R | - |
|
||||
| Zoho WorkDrive | - | - | No | No | - | - |
|
||||
| The local filesystem | All | DR/W | Depends | No | - | DRWU |
|
||||
| Name | Hash | ModTime | Case Insensitive | Duplicate Files | MIME Type | Metadata |
|
||||
| ----------------------------- | :---------------: | :-----: | :--------------: | :-------------: | :-------: | :------: |
|
||||
| 1Fichier | Whirlpool | - | No | Yes | R | - |
|
||||
| Akamai Netstorage | MD5, SHA256 | R/W | No | No | R | - |
|
||||
| Amazon S3 (or S3 compatible) | MD5 | R/W | No | No | R/W | RWU |
|
||||
| Backblaze B2 | SHA1 | R/W | No | No | R/W | - |
|
||||
| Box | SHA1 | R/W | Yes | No | - | - |
|
||||
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
|
||||
| Cloudinary | MD5 | R | No | Yes | - | - |
|
||||
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
|
||||
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
|
||||
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
|
||||
| Files.com | MD5, CRC32 | DR/W | Yes | No | R | - |
|
||||
| FTP | - | R/W ¹⁰ | No | No | - | - |
|
||||
| Gofile | MD5 | DR/W | No | Yes | R | - |
|
||||
| Google Cloud Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| Google Drive | MD5, SHA1, SHA256 | DR/W | No | Yes | R/W | DRWU |
|
||||
| Google Photos | - | - | No | Yes | R | - |
|
||||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | R |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
| Koofr | MD5 | - | Yes | No | - | - |
|
||||
| Linkbox | - | R | No | No | - | - |
|
||||
| Mail.ru Cloud | Mailru ⁶ | R/W | Yes | No | - | - |
|
||||
| Mega | - | - | No | Yes | - | - |
|
||||
| Memory | MD5 | R/W | No | No | - | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| Microsoft Azure Files Storage | MD5 | R/W | Yes | No | R/W | - |
|
||||
| Microsoft OneDrive | QuickXorHash ⁵ | DR/W | Yes | No | R | DRW |
|
||||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | RU |
|
||||
| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - |
|
||||
| PikPak | MD5 | R | No | No | R | - |
|
||||
| Pixeldrain | SHA256 | R/W | No | No | R | RW |
|
||||
| premiumize.me | - | - | Yes | No | R | - |
|
||||
| put.io | CRC-32 | R/W | No | Yes | R | - |
|
||||
| Proton Drive | SHA1 | R/W | No | No | R | - |
|
||||
| QingStor | MD5 | - ⁹ | No | No | R/W | - |
|
||||
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
||||
| Seafile | - | - | No | No | - | - |
|
||||
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
||||
| Shade | - | - | Yes | No | - | - |
|
||||
| Sia | - | - | No | No | - | - |
|
||||
| SMB | - | R/W | Yes | No | - | - |
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
| Storj | - | R | No | No | - | - |
|
||||
| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - |
|
||||
| Uptobox | - | - | No | Yes | - | - |
|
||||
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
|
||||
| Yandex Disk | MD5 | R/W | No | No | R | - |
|
||||
| Zoho WorkDrive | - | - | No | No | - | - |
|
||||
| The local filesystem | All | DR/W | Depends | No | - | DRWU |
|
||||
|
||||
¹ Dropbox supports [its own custom
|
||||
hash](https://www.dropbox.com/developers/reference/content-hash).
|
||||
@@ -135,7 +136,7 @@ size by default, though can be configured to check the file hash
|
||||
change the timestamp of an existing file without having to re-upload it.
|
||||
|
||||
| Key | Explanation |
|
||||
|-----|-------------|
|
||||
| --- | ----------- |
|
||||
| `-` | ModTimes not supported - times likely the upload time |
|
||||
| `R` | ModTimes supported on files but can't be changed without re-upload |
|
||||
| `R/W` | Read and Write ModTimes fully supported on files |
|
||||
@@ -282,8 +283,8 @@ will be escaped with the `‛` character to avoid ambiguous file names.
|
||||
Each cloud storage backend can use a different set of characters,
|
||||
which will be specified in the documentation for each backend.
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| Character | Value | Replacement |
|
||||
| --------- | :---: | :---------- :|
|
||||
| NUL | 0x00 | ␀ |
|
||||
| SOH | 0x01 | ␁ |
|
||||
| STX | 0x02 | ␂ |
|
||||
@@ -323,9 +324,9 @@ The default encoding will also encode these file names as they are
|
||||
problematic with many cloud storage systems.
|
||||
|
||||
| File name | Replacement |
|
||||
| --------- |:-----------:|
|
||||
| --------- | :--------- :|
|
||||
| . | . |
|
||||
| .. | .. |
|
||||
| .. | .. |
|
||||
|
||||
#### Invalid UTF-8 bytes {#invalid-utf8}
|
||||
|
||||
@@ -365,8 +366,8 @@ list of all possible values by passing an invalid value to this
|
||||
flag, e.g. `--local-encoding "help"`. The command `rclone help flags encoding`
|
||||
will show you the defaults for the backends.
|
||||
|
||||
| Encoding | Characters | Encoded as |
|
||||
| --------- | ---------- | ---------- |
|
||||
| Encoding | Characters | Encoded as |
|
||||
| -------- | ---------- | ---------- |
|
||||
| Asterisk | `*` | `*` |
|
||||
| BackQuote | `` ` `` | ``` |
|
||||
| BackSlash | `\` | `\` |
|
||||
@@ -491,12 +492,12 @@ that backend) and/or user metadata (general purpose metadata).
|
||||
The levels of metadata support are
|
||||
|
||||
| Key | Explanation |
|
||||
|-----|-------------|
|
||||
| `R` | Read only System Metadata on files only|
|
||||
| `RW` | Read and write System Metadata on files only|
|
||||
| `RWU` | Read and write System Metadata and read and write User Metadata on files only|
|
||||
| --- | ----------- |
|
||||
| `R` | Read only System Metadata on files only |
|
||||
| `RW` | Read and write System Metadata on files only |
|
||||
| `RWU` | Read and write System Metadata and read and write User Metadata on files only |
|
||||
| `DR` | Read only System Metadata on files and directories |
|
||||
| `DRW` | Read and write System Metadata on files and directories|
|
||||
| `DRW` | Read and write System Metadata on files and directories |
|
||||
| `DRWU` | Read and write System Metadata and read and write User Metadata on files and directories |
|
||||
|
||||
See [the metadata docs](/docs/#metadata) for more info.
|
||||
@@ -506,60 +507,60 @@ See [the metadata docs](/docs/#metadata) for more info.
|
||||
All rclone remotes support a base command set. Other features depend
|
||||
upon backend-specific capabilities.
|
||||
|
||||
| Name | Purge | Copy | Move | DirMove | CleanUp | ListR | StreamUpload | MultithreadUpload | LinkSharing | About | EmptyDir |
|
||||
| ---------------------------- |:-----:|:----:|:----:|:-------:|:-------:|:-----:|:------------:|:------------------|:------------:|:-----:|:--------:|
|
||||
| 1Fichier | No | Yes | Yes | No | No | No | No | No | Yes | No | Yes |
|
||||
| Akamai Netstorage | Yes | No | No | No | No | Yes | Yes | No | No | No | Yes |
|
||||
| Amazon S3 (or S3 compatible) | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | No | Yes | No | No | No | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Google Photos | No | No | No | No | No | No | No | No | No | No | No |
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes |
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| ImageKit | Yes | No | Yes | No | No | No | No | No | No | No | Yes |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Mega | Yes | No | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Memory | No | Yes | No | No | No | Yes | Yes | No | No | No | No |
|
||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
|
||||
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | No | Yes | Yes |
|
||||
| Proton Drive | Yes | No | Yes | Yes | Yes | No | No | No | No | Yes | Yes |
|
||||
| QingStor | No | Yes | No | No | Yes | Yes | No | No | No | No | No |
|
||||
| Quatrix by Maytech | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| SFTP | No | Yes ⁴| Yes | Yes | No | No | Yes | No | No | Yes | Yes |
|
||||
| Sia | No | No | No | No | No | No | Yes | No | No | No | Yes |
|
||||
| SMB | No | No | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
|
||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No |
|
||||
| Uloz.to | No | No | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |
|
||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Zoho WorkDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| The local filesystem | No | No | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Name | Purge | Copy | Move | DirMove | CleanUp | ListR | StreamUpload | MultithreadUpload | LinkSharing | About | EmptyDir |
|
||||
| ----------------------------- | :---: | :--: | :--: | :-----: | :-----: | :---: | :----------: | :-----------------| :----------: | :---: | :------: |
|
||||
| 1Fichier | No | Yes | Yes | No | No | No | No | No | Yes | No | Yes |
|
||||
| Akamai Netstorage | Yes | No | No | No | No | Yes | Yes | No | No | No | Yes |
|
||||
| Amazon S3 (or S3 compatible) | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | No | Yes | No | No | No | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| Google Photos | No | No | No | No | No | No | No | No | No | No | No |
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes |
|
||||
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| ImageKit | Yes | No | Yes | No | No | No | No | No | No | No | Yes |
|
||||
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Mail.ru Cloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Mega | Yes | No | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Memory | No | Yes | No | No | No | Yes | Yes | No | No | No | No |
|
||||
| Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No |
|
||||
| Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
| Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes |
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
|
||||
| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | No | Yes | Yes |
|
||||
| Proton Drive | Yes | No | Yes | Yes | Yes | No | No | No | No | Yes | Yes |
|
||||
| QingStor | No | Yes | No | No | Yes | Yes | No | No | No | No | No |
|
||||
| Quatrix by Maytech | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| Seafile | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
| SFTP | No | Yes ⁴| Yes | Yes | No | No | Yes | No | No | Yes | Yes |
|
||||
| Sia | No | No | No | No | No | No | Yes | No | No | No | Yes |
|
||||
| SMB | No | No | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
|
||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No |
|
||||
| Uloz.to | No | No | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |
|
||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Zoho WorkDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| The local filesystem | No | No | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
|
||||
|
||||
¹ Note Swift implements this in order to delete directory markers but
|
||||
it doesn't actually have a quicker way of deleting files other than
|
||||
|
||||
@@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return
|
||||
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
||||
value in the config file.
|
||||
|
||||
### Change notifications and mounts
|
||||
|
||||
The pCloud backend supports real‑time updates for rclone mounts via change
|
||||
notifications. rclone uses pCloud’s diff long‑polling API to detect changes and
|
||||
will automatically refresh directory listings in the mounted filesystem when
|
||||
changes occur.
|
||||
|
||||
Notes and behavior:
|
||||
|
||||
- Works automatically when using `rclone mount` and requires no additional
|
||||
configuration.
|
||||
- Notifications are directory‑scoped: when rclone detects a change, it refreshes
|
||||
the affected directory so new/removed/renamed files become visible promptly.
|
||||
- Updates are near real‑time. The backend uses a long‑poll with short fallback
|
||||
polling intervals, so you should see changes appear quickly without manual
|
||||
refreshes.
|
||||
|
||||
If you want to debug or verify notifications, you can use the helper command:
|
||||
|
||||
```bash
|
||||
rclone test changenotify remote:
|
||||
```
|
||||
|
||||
This will log incoming change notifications for the given remote.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
|
||||
218
docs/content/shade.md
Normal file
218
docs/content/shade.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# {{< icon "fa fa-moon" >}} Shade
|
||||
|
||||
This is a backend for the [Shade](https://shade.inc/) platform
|
||||
|
||||
## About Shade
|
||||
|
||||
[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage.
|
||||
|
||||
|
||||
## Accounts & Pricing
|
||||
|
||||
To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making a Shade configuration.
|
||||
|
||||
First, create a [create a free account](https://app.shade.inc/) account and choose a plan.
|
||||
|
||||
You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively.
|
||||
|
||||
Now run
|
||||
|
||||
`rclone config`
|
||||
|
||||
Follow this interactive process:
|
||||
|
||||
```sh
|
||||
$ rclone config
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> Shade
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[OTHER OPTIONS]
|
||||
xx / Shade FS
|
||||
\ (shade)
|
||||
[OTHER OPTIONS]
|
||||
Storage> xx
|
||||
|
||||
Option drive_id.
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
Enter a value.
|
||||
drive_id> [YOUR_ID]
|
||||
|
||||
Option api_key.
|
||||
An API key for your account.
|
||||
Enter a value.
|
||||
api_key> [YOUR_API_KEY]
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: shade
|
||||
- drive_id: [YOUR_ID]
|
||||
- api_key: [YOUR_API_KEY]
|
||||
Keep this "Shade" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Shade does not support hashes and writing mod times.
|
||||
|
||||
|
||||
### Transfers
|
||||
|
||||
Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests.
|
||||
|
||||
### Deleting files
|
||||
|
||||
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
|
||||
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-drive-id
|
||||
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: drive_id
|
||||
- Env Var: RCLONE_SHADE_DRIVE_ID
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --shade-api-key
|
||||
|
||||
An API key for your account. You can find this under Settings > API Keys
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_SHADE_API_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-endpoint
|
||||
|
||||
Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_SHADE_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --shade-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that this is stored in memory per transfer, so increasing it will
|
||||
increase memory usage.
|
||||
|
||||
Minimum is 5MB, maximum is 5GB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_SHADE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 64Mi
|
||||
|
||||
#### --shade-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_SHADE_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --shade-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_SHADE_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Limitations
|
||||
|
||||
Note that Shade is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Shade only supports filenames up to 255 characters in length.
|
||||
|
||||
`rclone about` is not supported by the Shade backend. Backends without
|
||||
this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the shade backend.
|
||||
|
||||
Run them with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See the [backend](/commands/rclone_backend/) command for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
|
||||
@@ -107,6 +107,7 @@
|
||||
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
||||
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
||||
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
||||
<a class="dropdown-item" href="/shade/"><i class="fa fa-moon fa-fw"></i> Shade</a>
|
||||
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||
|
||||
@@ -372,7 +372,7 @@ func (p *pipedInput) Read(b []byte) (int, error) {
|
||||
return p.Reader.Read(b)
|
||||
}
|
||||
|
||||
func (_ *pipedInput) Seek(_ int64, _ int) (int64, error) {
|
||||
func (*pipedInput) Seek(int64, int) (int64, error) {
|
||||
return 0, fmt.Errorf("Seek not supported")
|
||||
}
|
||||
|
||||
|
||||
@@ -209,7 +209,7 @@ func InitLogging() {
|
||||
// Log file output
|
||||
if Opt.File != "" {
|
||||
var w io.Writer
|
||||
if Opt.MaxSize == 0 {
|
||||
if Opt.MaxSize < 0 {
|
||||
// No log rotation - just open the file as normal
|
||||
// We'll capture tracebacks like this too.
|
||||
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
|
||||
@@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(
|
||||
t,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -507,6 +508,7 @@ func TestError(t *testing.T) {
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
assert.Error(t, err)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
|
||||
@@ -662,6 +662,10 @@ backends:
|
||||
ignoretests:
|
||||
- cmd/bisync
|
||||
- cmd/gitannex
|
||||
- backend: "shade"
|
||||
remote: "TestShade:"
|
||||
fastlist: false
|
||||
|
||||
- backend: "archive"
|
||||
remote: "TestArchive:"
|
||||
fastlist: false
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -111,9 +110,6 @@ func TestWriteFileDup(t *testing.T) {
|
||||
|
||||
var dupFd uintptr
|
||||
dupFd, err = writeTestDup(fh.Fd())
|
||||
if err == vfs.ENOSYS {
|
||||
t.Skip("dup not supported on this platform")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
dupFile := os.NewFile(dupFd, fh.Name())
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !linux && !darwin && !freebsd && !openbsd && !windows
|
||||
//go:build !linux && !darwin && !freebsd && !windows
|
||||
|
||||
package vfstest
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build linux || darwin || freebsd || openbsd
|
||||
//go:build linux || darwin || freebsd
|
||||
|
||||
package vfstest
|
||||
|
||||
|
||||
Reference in New Issue
Block a user