mirror of
https://github.com/rclone/rclone.git
synced 2025-12-19 17:53:16 +00:00
Compare commits
22 Commits
fix-1727-o
...
dump-curl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b141a553be | ||
|
|
f81cd7d279 | ||
|
|
1a0a4628d7 | ||
|
|
c10a4d465c | ||
|
|
3a6e07a613 | ||
|
|
c36f99d343 | ||
|
|
3e21a7261b | ||
|
|
fd439fab62 | ||
|
|
976aa6b416 | ||
|
|
b3a0383ca3 | ||
|
|
c13f129339 | ||
|
|
748d8c8957 | ||
|
|
4d379efcbb | ||
|
|
e5e6a4b5ae | ||
|
|
df18e8c55b | ||
|
|
f4e17d8b0b | ||
|
|
e5c69511bc | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -229,7 +229,7 @@ jobs:
|
|||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Cache
|
- name: Cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/go/pkg/mod
|
~/go/pkg/mod
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Load Go Build Cache for Docker
|
- name: Load Go Build Cache for Docker
|
||||||
id: go-cache
|
id: go-cache
|
||||||
uses: actions/cache@v4
|
uses: actions/cache@v5
|
||||||
with:
|
with:
|
||||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
@@ -183,7 +183,7 @@ jobs:
|
|||||||
touch "/tmp/digests/${digest#sha256:}"
|
touch "/tmp/digests/${digest#sha256:}"
|
||||||
|
|
||||||
- name: Upload Image Digest
|
- name: Upload Image Digest
|
||||||
uses: actions/upload-artifact@v5
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: digests-${{ env.PLATFORM }}
|
name: digests-${{ env.PLATFORM }}
|
||||||
path: /tmp/digests/*
|
path: /tmp/digests/*
|
||||||
@@ -198,7 +198,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Download Image Digests
|
- name: Download Image Digests
|
||||||
uses: actions/download-artifact@v6
|
uses: actions/download-artifact@v7
|
||||||
with:
|
with:
|
||||||
path: /tmp/digests
|
path: /tmp/digests
|
||||||
pattern: digests-*
|
pattern: digests-*
|
||||||
|
|||||||
@@ -109,6 +109,7 @@ directories to and from different cloud storage providers.
|
|||||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||||
|
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/s3"
|
_ "github.com/rclone/rclone/backend/s3"
|
||||||
_ "github.com/rclone/rclone/backend/seafile"
|
_ "github.com/rclone/rclone/backend/seafile"
|
||||||
_ "github.com/rclone/rclone/backend/sftp"
|
_ "github.com/rclone/rclone/backend/sftp"
|
||||||
|
_ "github.com/rclone/rclone/backend/shade"
|
||||||
_ "github.com/rclone/rclone/backend/sharefile"
|
_ "github.com/rclone/rclone/backend/sharefile"
|
||||||
_ "github.com/rclone/rclone/backend/sia"
|
_ "github.com/rclone/rclone/backend/sia"
|
||||||
_ "github.com/rclone/rclone/backend/smb"
|
_ "github.com/rclone/rclone/backend/smb"
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -25,6 +26,7 @@ var (
|
|||||||
hashType = hash.MD5
|
hashType = hash.MD5
|
||||||
// the object storage is persistent
|
// the object storage is persistent
|
||||||
buckets = newBucketsInfo()
|
buckets = newBucketsInfo()
|
||||||
|
errWriteOnly = errors.New("can't read when using --memory-discard")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -33,12 +35,32 @@ func init() {
|
|||||||
Name: "memory",
|
Name: "memory",
|
||||||
Description: "In memory object storage system.",
|
Description: "In memory object storage system.",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{},
|
Options: []fs.Option{{
|
||||||
|
Name: "discard",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Help: `If set all writes will be discarded and reads will return an error
|
||||||
|
|
||||||
|
If set then when files are uploaded the contents not be saved. The
|
||||||
|
files will appear to have been uploaded but will give an error on
|
||||||
|
read. Files will have their MD5 sum calculated on upload which takes
|
||||||
|
very little CPU time and allows the transfers to be checked.
|
||||||
|
|
||||||
|
This can be useful for testing performance.
|
||||||
|
|
||||||
|
Probably most easily used by using the connection string syntax:
|
||||||
|
|
||||||
|
:memory,discard:bucket
|
||||||
|
|
||||||
|
`,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct{}
|
type Options struct {
|
||||||
|
Discard bool `config:"discard"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote memory server
|
// Fs represents a remote memory server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
@@ -164,6 +186,7 @@ type objectData struct {
|
|||||||
hash string
|
hash string
|
||||||
mimeType string
|
mimeType string
|
||||||
data []byte
|
data []byte
|
||||||
|
size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a memory object
|
// Object describes a memory object
|
||||||
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
if t != hashType {
|
if t != hashType {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
if o.od.hash == "" {
|
if o.od.hash == "" && !o.fs.opt.Discard {
|
||||||
sum := md5.Sum(o.od.data)
|
sum := md5.Sum(o.od.data)
|
||||||
o.od.hash = hex.EncodeToString(sum[:])
|
o.od.hash = hex.EncodeToString(sum[:])
|
||||||
}
|
}
|
||||||
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
return int64(len(o.od.data))
|
return o.od.size
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
if o.fs.opt.Discard {
|
||||||
|
return nil, errWriteOnly
|
||||||
|
}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
data, err := io.ReadAll(in)
|
var data []byte
|
||||||
|
var size int64
|
||||||
|
var hash string
|
||||||
|
if o.fs.opt.Discard {
|
||||||
|
h := md5.New()
|
||||||
|
size, err = io.Copy(h, in)
|
||||||
|
hash = hex.EncodeToString(h.Sum(nil))
|
||||||
|
} else {
|
||||||
|
data, err = io.ReadAll(in)
|
||||||
|
size = int64(len(data))
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to update memory object: %w", err)
|
return fmt.Errorf("failed to update memory object: %w", err)
|
||||||
}
|
}
|
||||||
o.od = &objectData{
|
o.od = &objectData{
|
||||||
data: data,
|
data: data,
|
||||||
hash: "",
|
size: size,
|
||||||
|
hash: hash,
|
||||||
modTime: src.ModTime(ctx),
|
modTime: src.ModTime(ctx),
|
||||||
mimeType: fs.MimeType(ctx, src),
|
mimeType: fs.MimeType(ctx, src),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -222,3 +222,11 @@ type UserInfo struct {
|
|||||||
} `json:"steps"`
|
} `json:"steps"`
|
||||||
} `json:"journey"`
|
} `json:"journey"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiffResult is the response from /diff
|
||||||
|
type DiffResult struct {
|
||||||
|
Result int `json:"result"`
|
||||||
|
DiffID int64 `json:"diffid"`
|
||||||
|
Entries []map[string]any `json:"entries"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -171,6 +171,7 @@ type Fs struct {
|
|||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
|
lastDiffID int64 // change tracking state for diff long-polling
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a pcloud object
|
// Object describes a pcloud object
|
||||||
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChangeNotify implements fs.Features.ChangeNotify
|
||||||
|
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
|
// Start long-poll loop in background
|
||||||
|
go f.changeNotifyLoop(ctx, notify, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeNotifyLoop contains the blocking long-poll logic.
|
||||||
|
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
|
// Standard polling interval
|
||||||
|
interval := 30 * time.Second
|
||||||
|
|
||||||
|
// Start with diffID = 0 to get the current state
|
||||||
|
var diffID int64
|
||||||
|
|
||||||
|
// Helper to process changes from the diff API
|
||||||
|
handleChanges := func(entries []map[string]any) {
|
||||||
|
notifiedPaths := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
meta, ok := entry["metadata"].(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Robust extraction of ParentFolderID
|
||||||
|
var pid int64
|
||||||
|
if val, ok := meta["parentfolderid"]; ok {
|
||||||
|
switch v := val.(type) {
|
||||||
|
case float64:
|
||||||
|
pid = int64(v)
|
||||||
|
case int64:
|
||||||
|
pid = v
|
||||||
|
case int:
|
||||||
|
pid = int64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the path using dirCache.GetInv
|
||||||
|
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
|
||||||
|
dirID := fmt.Sprintf("d%d", pid)
|
||||||
|
parentPath, ok := f.dirCache.GetInv(dirID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
// Parent not in cache, so we can ignore this change as it is outside
|
||||||
|
// of what the mount has seen or cares about.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name, _ := meta["name"].(string)
|
||||||
|
fullPath := path.Join(parentPath, name)
|
||||||
|
|
||||||
|
// Determine EntryType (File or Directory)
|
||||||
|
entryType := fs.EntryObject
|
||||||
|
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
|
||||||
|
entryType = fs.EntryDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate notifications for this batch
|
||||||
|
if !notifiedPaths[fullPath] {
|
||||||
|
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
|
||||||
|
notify(fullPath, entryType)
|
||||||
|
notifiedPaths[fullPath] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Check context and channel
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case newInterval, ok := <-ch:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interval = newInterval
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup /diff Request
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/diff",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diffID != 0 {
|
||||||
|
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
|
||||||
|
opts.Parameters.Set("block", "1")
|
||||||
|
} else {
|
||||||
|
opts.Parameters.Set("last", "0")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform Long-Poll
|
||||||
|
// Timeout set to 90s (server usually blocks for 60s max)
|
||||||
|
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||||
|
var result api.DiffResult
|
||||||
|
|
||||||
|
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Ignore timeout errors as they are normal for long-polling
|
||||||
|
if !errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
|
||||||
|
time.Sleep(interval)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If result is not 0, reset DiffID to resync
|
||||||
|
if result.Result != 0 {
|
||||||
|
diffID = 0
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.DiffID != 0 {
|
||||||
|
diffID = result.DiffID
|
||||||
|
f.lastDiffID = diffID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Entries) > 0 {
|
||||||
|
handleChanges(result.Entries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||||
@@ -1401,6 +1533,7 @@ var (
|
|||||||
_ fs.ListPer = (*Fs)(nil)
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,26 +1,26 @@
|
|||||||
name: Linode
|
name: Linode
|
||||||
description: Linode Object Storage
|
description: Linode Object Storage
|
||||||
endpoint:
|
endpoint:
|
||||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
|
||||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
|
||||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
|
||||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
|
||||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
|
||||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
|
||||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
|
||||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
|
||||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
|
||||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
|
||||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
|
||||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
|
||||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
|
||||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
|
||||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
|
||||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
|
||||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
|
||||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
|
||||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
|
||||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
|
||||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
|
||||||
acl: {}
|
acl: {}
|
||||||
bucket_acl: true
|
bucket_acl: true
|
||||||
|
|||||||
@@ -2,7 +2,17 @@ name: Selectel
|
|||||||
description: Selectel Object Storage
|
description: Selectel Object Storage
|
||||||
region:
|
region:
|
||||||
ru-1: St. Petersburg
|
ru-1: St. Petersburg
|
||||||
|
ru-3: St. Petersburg
|
||||||
|
ru-7: Moscow
|
||||||
|
gis-1: Moscow
|
||||||
|
kz-1: Kazakhstan
|
||||||
|
uz-2: Uzbekistan
|
||||||
endpoint:
|
endpoint:
|
||||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
s3.ru-1.storage.selcloud.ru: St. Petersburg
|
||||||
|
s3.ru-3.storage.selcloud.ru: St. Petersburg
|
||||||
|
s3.ru-7.storage.selcloud.ru: Moscow
|
||||||
|
s3.gis-1.storage.selcloud.ru: Moscow
|
||||||
|
s3.kz-1.storage.selcloud.ru: Kazakhstan
|
||||||
|
s3.uz-2.storage.selcloud.ru: Uzbekistan
|
||||||
quirks:
|
quirks:
|
||||||
list_url_encode: false
|
list_url_encode: false
|
||||||
|
|||||||
27
backend/shade/api/types.go
Normal file
27
backend/shade/api/types.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
// Package api has type definitions for shade
|
||||||
|
package api
|
||||||
|
|
||||||
|
// ListDirResponse -------------------------------------------------
|
||||||
|
// Format from shade api
|
||||||
|
type ListDirResponse struct {
|
||||||
|
Type string `json:"type"` // "file" or "tree"
|
||||||
|
Path string `json:"path"` // Full path including root
|
||||||
|
Ino int `json:"ino"` // inode number
|
||||||
|
Mtime int64 `json:"mtime"` // Modified time in milliseconds
|
||||||
|
Ctime int64 `json:"ctime"` // Created time in milliseconds
|
||||||
|
Size int64 `json:"size"` // Size in bytes
|
||||||
|
Hash string `json:"hash"` // MD5 hash
|
||||||
|
Draft bool `json:"draft"` // Whether this is a draft file
|
||||||
|
}
|
||||||
|
|
||||||
|
// PartURL Type for multipart upload/download
|
||||||
|
type PartURL struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Headers map[string]string `json:"headers,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompletedPart Type for completed parts when making a multipart upload.
|
||||||
|
type CompletedPart struct {
|
||||||
|
ETag string
|
||||||
|
PartNumber int32
|
||||||
|
}
|
||||||
1039
backend/shade/shade.go
Normal file
1039
backend/shade/shade.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/shade/shade_test.go
Normal file
21
backend/shade/shade_test.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package shade_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/shade"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
name := "TestShade"
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: name + ":",
|
||||||
|
NilObject: (*shade.Object)(nil),
|
||||||
|
SkipInvalidUTF8: true,
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
336
backend/shade/upload.go
Normal file
336
backend/shade/upload.go
Normal file
@@ -0,0 +1,336 @@
|
|||||||
|
//multipart upload for shade
|
||||||
|
|
||||||
|
package shade
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/shade/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
|
"github.com/rclone/rclone/lib/multipart"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
var warnStreamUpload sync.Once
|
||||||
|
|
||||||
|
type shadeChunkWriter struct {
|
||||||
|
initToken string
|
||||||
|
chunkSize int64
|
||||||
|
size int64
|
||||||
|
f *Fs
|
||||||
|
o *Object
|
||||||
|
completedParts []api.CompletedPart
|
||||||
|
completedPartsMu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// uploadMultipart handles multipart upload for larger files
|
||||||
|
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||||
|
|
||||||
|
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||||
|
Open: o.fs,
|
||||||
|
OpenOptions: options,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var shadeWriter = chunkWriter.(*shadeChunkWriter)
|
||||||
|
o.size = shadeWriter.size
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||||
|
//
|
||||||
|
// Pass in the remote and the src object
|
||||||
|
// You can also use options to hint at the desired chunk size
|
||||||
|
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||||
|
// Temporary Object under construction
|
||||||
|
o := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: remote,
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadParts := f.opt.MaxUploadParts
|
||||||
|
if uploadParts < 1 {
|
||||||
|
uploadParts = 1
|
||||||
|
} else if uploadParts > maxUploadParts {
|
||||||
|
uploadParts = maxUploadParts
|
||||||
|
}
|
||||||
|
size := src.Size()
|
||||||
|
fs.FixRangeOption(options, size)
|
||||||
|
|
||||||
|
// calculate size of parts
|
||||||
|
chunkSize := f.opt.ChunkSize
|
||||||
|
|
||||||
|
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||||
|
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
|
||||||
|
// 640 GB.
|
||||||
|
if size == -1 {
|
||||||
|
warnStreamUpload.Do(func() {
|
||||||
|
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||||
|
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := o.fs.refreshJWTToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, fmt.Errorf("failed to get token: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.ensureParentDirectories(ctx, remote)
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fullPath := remote
|
||||||
|
if f.root != "" {
|
||||||
|
fullPath = path.Join(f.root, remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initiate multipart upload
|
||||||
|
type initRequest struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
PartSize int64 `json:"partSize"`
|
||||||
|
}
|
||||||
|
reqBody := initRequest{
|
||||||
|
Path: fullPath,
|
||||||
|
PartSize: int64(chunkSize),
|
||||||
|
}
|
||||||
|
|
||||||
|
var initResp struct {
|
||||||
|
Token string `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
|
||||||
|
RootURL: o.fs.endpoint,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Authorization": "Bearer " + token,
|
||||||
|
},
|
||||||
|
Options: options,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
|
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
|
||||||
|
if err != nil {
|
||||||
|
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkWriter := &shadeChunkWriter{
|
||||||
|
initToken: initResp.Token,
|
||||||
|
chunkSize: int64(chunkSize),
|
||||||
|
size: size,
|
||||||
|
f: f,
|
||||||
|
o: o,
|
||||||
|
}
|
||||||
|
info = fs.ChunkWriterInfo{
|
||||||
|
ChunkSize: int64(chunkSize),
|
||||||
|
Concurrency: f.opt.Concurrency,
|
||||||
|
LeavePartsOnError: false,
|
||||||
|
}
|
||||||
|
return info, chunkWriter, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||||
|
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||||
|
|
||||||
|
token, err := s.f.refreshJWTToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read chunk
|
||||||
|
var chunk bytes.Buffer
|
||||||
|
n, err := io.Copy(&chunk, reader)
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to read chunk: %w", err)
|
||||||
|
}
|
||||||
|
// Get presigned URL for this part
|
||||||
|
var partURL api.PartURL
|
||||||
|
|
||||||
|
partOpts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
|
||||||
|
RootURL: s.f.endpoint,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Authorization": "Bearer " + token,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
|
||||||
|
if err != nil {
|
||||||
|
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to get part URL: %w", err)
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
RootURL: partURL.URL,
|
||||||
|
Body: &chunk,
|
||||||
|
ContentType: "",
|
||||||
|
ContentLength: &n,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add headers
|
||||||
|
var uploadRes *http.Response
|
||||||
|
if len(partURL.Headers) > 0 {
|
||||||
|
opts.ExtraHeaders = make(map[string]string)
|
||||||
|
for k, v := range partURL.Headers {
|
||||||
|
opts.ExtraHeaders[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.f.pacer.Call(func() (bool, error) {
|
||||||
|
uploadRes, err = s.f.srv.Call(ctx, &opts)
|
||||||
|
if err != nil {
|
||||||
|
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
|
||||||
|
body, _ := io.ReadAll(uploadRes.Body)
|
||||||
|
fs.CheckClose(uploadRes.Body, &err)
|
||||||
|
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get ETag from response
|
||||||
|
etag := uploadRes.Header.Get("ETag")
|
||||||
|
fs.CheckClose(uploadRes.Body, &err)
|
||||||
|
|
||||||
|
s.completedPartsMu.Lock()
|
||||||
|
defer s.completedPartsMu.Unlock()
|
||||||
|
s.completedParts = append(s.completedParts, api.CompletedPart{
|
||||||
|
PartNumber: int32(chunkNumber + 1),
|
||||||
|
ETag: etag,
|
||||||
|
})
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close complete chunked writer finalising the file.
|
||||||
|
func (s *shadeChunkWriter) Close(ctx context.Context) error {
|
||||||
|
|
||||||
|
// Complete multipart upload
|
||||||
|
sort.Slice(s.completedParts, func(i, j int) bool {
|
||||||
|
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
|
||||||
|
})
|
||||||
|
|
||||||
|
type completeRequest struct {
|
||||||
|
Parts []api.CompletedPart `json:"parts"`
|
||||||
|
}
|
||||||
|
var completeBody completeRequest
|
||||||
|
|
||||||
|
if s.completedParts == nil {
|
||||||
|
completeBody = completeRequest{Parts: []api.CompletedPart{}}
|
||||||
|
} else {
|
||||||
|
completeBody = completeRequest{Parts: s.completedParts}
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := s.f.refreshJWTToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
completeOpts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||||
|
RootURL: s.f.endpoint,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Authorization": "Bearer " + token,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var response http.Response
|
||||||
|
|
||||||
|
err = s.f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
|
||||||
|
|
||||||
|
if err != nil && res == nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode == http.StatusTooManyRequests {
|
||||||
|
return true, err // Retry on 429
|
||||||
|
}
|
||||||
|
|
||||||
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||||
|
body, _ := io.ReadAll(res.Body)
|
||||||
|
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort chunk write
|
||||||
|
//
|
||||||
|
// You can and should call Abort without calling Close.
|
||||||
|
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
|
||||||
|
token, err := s.f.refreshJWTToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||||
|
RootURL: s.f.endpoint,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Authorization": "Bearer " + token,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.f.pacer.Call(func() (bool, error) {
|
||||||
|
res, err := s.f.srv.Call(ctx, &opts)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
|
||||||
|
return false, nil // Don't retry abort
|
||||||
|
}
|
||||||
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||||
|
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to abort multipart upload: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -84,6 +84,7 @@ docs = [
|
|||||||
"protondrive.md",
|
"protondrive.md",
|
||||||
"seafile.md",
|
"seafile.md",
|
||||||
"sftp.md",
|
"sftp.md",
|
||||||
|
"shade.md",
|
||||||
"smb.md",
|
"smb.md",
|
||||||
"storj.md",
|
"storj.md",
|
||||||
"sugarsync.md",
|
"sugarsync.md",
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||||
|
|
||||||
package cmount
|
package cmount
|
||||||
|
|
||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -211,12 +210,6 @@ func (fsys *FS) Readdir(dirPath string,
|
|||||||
// We can't seek in directories and FUSE should know that so
|
// We can't seek in directories and FUSE should know that so
|
||||||
// return an error if ofst is ever set.
|
// return an error if ofst is ever set.
|
||||||
if ofst > 0 {
|
if ofst > 0 {
|
||||||
// However openbsd doesn't seem to know this - perhaps a bug in its
|
|
||||||
// FUSE implementation or a bug in cgofuse?
|
|
||||||
// See: https://github.com/billziss-gh/cgofuse/issues/49
|
|
||||||
if runtime.GOOS == "openbsd" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return -fuse.ESPIPE
|
return -fuse.ESPIPE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
@@ -8,9 +8,9 @@ package cmount
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
@@ -59,14 +59,12 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
|||||||
} else {
|
} else {
|
||||||
options = append(options, "-o", "fsname="+device)
|
options = append(options, "-o", "fsname="+device)
|
||||||
options = append(options, "-o", "subtype=rclone")
|
options = append(options, "-o", "subtype=rclone")
|
||||||
if runtime.GOOS != "openbsd" {
|
|
||||||
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
options = append(options, "-o", fmt.Sprintf("max_readahead=%d", opt.MaxReadAhead))
|
||||||
// This causes FUSE to supply O_TRUNC with the Open
|
// This causes FUSE to supply O_TRUNC with the Open
|
||||||
// call which is more efficient for cmount. However
|
// call which is more efficient for cmount. However
|
||||||
// it does not work with cgofuse on Windows with
|
// it does not work with cgofuse on Windows with
|
||||||
// WinFSP so cmount must work with or without it.
|
// WinFSP so cmount must work with or without it.
|
||||||
options = append(options, "-o", "atomic_o_trunc")
|
options = append(options, "-o", "atomic_o_trunc")
|
||||||
}
|
|
||||||
if opt.DaemonTimeout != 0 {
|
if opt.DaemonTimeout != 0 {
|
||||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(time.Duration(opt.DaemonTimeout).Seconds())))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || (openbsd && cgo) || windows) && (!race || !windows)
|
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows) && (!race || !windows)
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (openbsd && cgo && cmount) || (windows && cmount))
|
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
|
||||||
|
|
||||||
// Package cmount implements a FUSE mounting system for rclone remotes.
|
// Package cmount implements a FUSE mounting system for rclone remotes.
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
|||||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||||
use |-R| to make them recurse.
|
use |-R| to make them recurse.
|
||||||
|
|
||||||
|
List commands prefer a recursive method that uses more memory but fewer
|
||||||
|
transactions by default. Use |--disable ListR| to suppress the behavior.
|
||||||
|
See [|--fast-list|](/docs/#fast-list) for more details.
|
||||||
|
|
||||||
Listing a nonexistent directory will produce an error except for
|
Listing a nonexistent directory will produce an error except for
|
||||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||||
the bucket-based remotes).`, "|", "`")
|
the bucket-based remotes).`, "|", "`")
|
||||||
|
|||||||
@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
|||||||
`--auth-key` is not provided then `serve s3` will allow anonymous
|
`--auth-key` is not provided then `serve s3` will allow anonymous
|
||||||
access.
|
access.
|
||||||
|
|
||||||
|
Like all rclone flags `--auth-key` can be set via environment
|
||||||
|
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
|
||||||
|
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
|
||||||
|
`accessKey,secretKey` has a comma in, this means it needs to be in
|
||||||
|
quotes.
|
||||||
|
|
||||||
|
```console
|
||||||
|
export RCLONE_AUTH_KEY='"user,pass"'
|
||||||
|
rclone serve s3 ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Or to supply multiple identities:
|
||||||
|
|
||||||
|
```console
|
||||||
|
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
|
||||||
|
rclone serve s3 ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting this variable without quotes will produce an error.
|
||||||
|
|
||||||
Please note that some clients may require HTTPS endpoints. See [the
|
Please note that some clients may require HTTPS endpoints. See [the
|
||||||
SSL docs](#tls-ssl) for more information.
|
SSL docs](#tls-ssl) for more information.
|
||||||
|
|
||||||
|
|||||||
@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
w.s3Secret = getAuthSecret(opt.AuthKey)
|
w.s3Secret = getAuthSecret(opt.AuthKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
authList, err := authlistResolver(opt.AuthKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing auth list failed: %q", err)
|
||||||
|
}
|
||||||
|
|
||||||
var newLogger logger
|
var newLogger logger
|
||||||
w.faker = gofakes3.New(
|
w.faker = gofakes3.New(
|
||||||
newBackend(w),
|
newBackend(w),
|
||||||
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
gofakes3.WithLogger(newLogger),
|
gofakes3.WithLogger(newLogger),
|
||||||
gofakes3.WithRequestID(rand.Uint64()),
|
gofakes3.WithRequestID(rand.Uint64()),
|
||||||
gofakes3.WithoutVersioning(),
|
gofakes3.WithoutVersioning(),
|
||||||
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
|
gofakes3.WithV4Auth(authList),
|
||||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
w._vfs = vfs.New(f, vfsOpt)
|
w._vfs = vfs.New(f, vfsOpt)
|
||||||
|
|
||||||
if len(opt.AuthKey) > 0 {
|
if len(opt.AuthKey) > 0 {
|
||||||
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
|
w.faker.AddAuthKeys(authList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package s3
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func authlistResolver(list []string) map[string]string {
|
func authlistResolver(list []string) (map[string]string, error) {
|
||||||
authList := make(map[string]string)
|
authList := make(map[string]string)
|
||||||
for _, v := range list {
|
for _, v := range list {
|
||||||
parts := strings.Split(v, ",")
|
parts := strings.Split(v, ",")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
|
return nil, errors.New("invalid auth pair: expecting a single comma")
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
authList[parts[0]] = parts[1]
|
authList[parts[0]] = parts[1]
|
||||||
}
|
}
|
||||||
return authList
|
return authList, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -202,6 +202,7 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||||
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
||||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||||
|
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
|
||||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||||
|
|||||||
@@ -1055,3 +1055,8 @@ put them back in again. -->
|
|||||||
- Vladislav Tropnikov <vtr.name@gmail.com>
|
- Vladislav Tropnikov <vtr.name@gmail.com>
|
||||||
- Leo <i@hardrain980.com>
|
- Leo <i@hardrain980.com>
|
||||||
- Johannes Rothe <mail@johannes-rothe.de>
|
- Johannes Rothe <mail@johannes-rothe.de>
|
||||||
|
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||||
|
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||||
|
- jhasse-shade <jacob@shade.inc>
|
||||||
|
- vyv03354 <VYV03354@nifty.ne.jp>
|
||||||
|
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
|
||||||
|
|||||||
@@ -6,6 +6,22 @@ description: "Rclone Changelog"
|
|||||||
|
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.72.1 - 2025-12-10
|
||||||
|
|
||||||
|
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
|
||||||
|
|
||||||
|
- Bug Fixes
|
||||||
|
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
|
||||||
|
- doc fixes (Duncan Smart, Nick Craig-Wood)
|
||||||
|
- configfile: Fix piped config support (Jonas Tingeborn)
|
||||||
|
- log
|
||||||
|
- Fix PID not included in JSON log output (Tingsong Xu)
|
||||||
|
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
|
||||||
|
- Google Cloud Storage
|
||||||
|
- Improve endpoint parameter docs (Johannes Rothe)
|
||||||
|
- S3
|
||||||
|
- Add missing regions for Selectel provider (Nick Craig-Wood)
|
||||||
|
|
||||||
## v1.72.0 - 2025-11-21
|
## v1.72.0 - 2025-11-21
|
||||||
|
|
||||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
||||||
|
|||||||
@@ -82,6 +82,7 @@ See the following for detailed instructions for
|
|||||||
- [rsync.net](/sftp/#rsync-net)
|
- [rsync.net](/sftp/#rsync-net)
|
||||||
- [Seafile](/seafile/)
|
- [Seafile](/seafile/)
|
||||||
- [SFTP](/sftp/)
|
- [SFTP](/sftp/)
|
||||||
|
- [Shade](/shade/)
|
||||||
- [Sia](/sia/)
|
- [Sia](/sia/)
|
||||||
- [SMB](/smb/)
|
- [SMB](/smb/)
|
||||||
- [Storj](/storj/)
|
- [Storj](/storj/)
|
||||||
@@ -3277,6 +3278,10 @@ The available flags are:
|
|||||||
- `mapper` dumps the JSON blobs being sent to the program supplied with
|
- `mapper` dumps the JSON blobs being sent to the program supplied with
|
||||||
`--metadata-mapper` and received from it. It can be useful for debugging
|
`--metadata-mapper` and received from it. It can be useful for debugging
|
||||||
the metadata mapper interface.
|
the metadata mapper interface.
|
||||||
|
- `curl` dumps the HTTP request as a `curl` command. Can be used with
|
||||||
|
the other HTTP debugging flags (e.g. `requests`, `bodies`). By
|
||||||
|
default the auth will be masked - use with `auth` to have the curl
|
||||||
|
commands with authentication too.
|
||||||
|
|
||||||
## Filtering
|
## Filtering
|
||||||
|
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||||||
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
||||||
| Seafile | - | - | No | No | - | - |
|
| Seafile | - | - | No | No | - | - |
|
||||||
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
||||||
|
| Shade | - | - | Yes | No | - | - |
|
||||||
| Sia | - | - | No | No | - | - |
|
| Sia | - | - | No | No | - | - |
|
||||||
| SMB | - | R/W | Yes | No | - | - |
|
| SMB | - | R/W | Yes | No | - | - |
|
||||||
| SugarSync | - | - | No | No | - | - |
|
| SugarSync | - | - | No | No | - | - |
|
||||||
@@ -540,7 +541,7 @@ upon backend-specific capabilities.
|
|||||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||||
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||||
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||||
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
|
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
|
||||||
|
|||||||
@@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return
|
|||||||
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
||||||
value in the config file.
|
value in the config file.
|
||||||
|
|
||||||
|
### Change notifications and mounts
|
||||||
|
|
||||||
|
The pCloud backend supports real‑time updates for rclone mounts via change
|
||||||
|
notifications. rclone uses pCloud’s diff long‑polling API to detect changes and
|
||||||
|
will automatically refresh directory listings in the mounted filesystem when
|
||||||
|
changes occur.
|
||||||
|
|
||||||
|
Notes and behavior:
|
||||||
|
|
||||||
|
- Works automatically when using `rclone mount` and requires no additional
|
||||||
|
configuration.
|
||||||
|
- Notifications are directory‑scoped: when rclone detects a change, it refreshes
|
||||||
|
the affected directory so new/removed/renamed files become visible promptly.
|
||||||
|
- Updates are near real‑time. The backend uses a long‑poll with short fallback
|
||||||
|
polling intervals, so you should see changes appear quickly without manual
|
||||||
|
refreshes.
|
||||||
|
|
||||||
|
If you want to debug or verify notifications, you can use the helper command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rclone test changenotify remote:
|
||||||
|
```
|
||||||
|
|
||||||
|
This will log incoming change notifications for the given remote.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
|
|||||||
218
docs/content/shade.md
Normal file
218
docs/content/shade.md
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
# {{< icon "fa fa-moon" >}} Shade
|
||||||
|
|
||||||
|
This is a backend for the [Shade](https://shade.inc/) platform
|
||||||
|
|
||||||
|
## About Shade
|
||||||
|
|
||||||
|
[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage.
|
||||||
|
|
||||||
|
|
||||||
|
## Accounts & Pricing
|
||||||
|
|
||||||
|
To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free.
|
||||||
|
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Paths are specified as `remote:path`
|
||||||
|
|
||||||
|
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||||
|
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Here is an example of making a Shade configuration.
|
||||||
|
|
||||||
|
First, create a [create a free account](https://app.shade.inc/) account and choose a plan.
|
||||||
|
|
||||||
|
You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively.
|
||||||
|
|
||||||
|
Now run
|
||||||
|
|
||||||
|
`rclone config`
|
||||||
|
|
||||||
|
Follow this interactive process:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ rclone config
|
||||||
|
e) Edit existing remote
|
||||||
|
n) New remote
|
||||||
|
d) Delete remote
|
||||||
|
r) Rename remote
|
||||||
|
c) Copy remote
|
||||||
|
s) Set configuration password
|
||||||
|
q) Quit config
|
||||||
|
e/n/d/r/c/s/q> n
|
||||||
|
|
||||||
|
Enter name for new remote.
|
||||||
|
name> Shade
|
||||||
|
|
||||||
|
Option Storage.
|
||||||
|
Type of storage to configure.
|
||||||
|
Choose a number from below, or type in your own value.
|
||||||
|
[OTHER OPTIONS]
|
||||||
|
xx / Shade FS
|
||||||
|
\ (shade)
|
||||||
|
[OTHER OPTIONS]
|
||||||
|
Storage> xx
|
||||||
|
|
||||||
|
Option drive_id.
|
||||||
|
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||||
|
Enter a value.
|
||||||
|
drive_id> [YOUR_ID]
|
||||||
|
|
||||||
|
Option api_key.
|
||||||
|
An API key for your account.
|
||||||
|
Enter a value.
|
||||||
|
api_key> [YOUR_API_KEY]
|
||||||
|
|
||||||
|
Edit advanced config?
|
||||||
|
y) Yes
|
||||||
|
n) No (default)
|
||||||
|
y/n> n
|
||||||
|
|
||||||
|
Configuration complete.
|
||||||
|
Options:
|
||||||
|
- type: shade
|
||||||
|
- drive_id: [YOUR_ID]
|
||||||
|
- api_key: [YOUR_API_KEY]
|
||||||
|
Keep this "Shade" remote?
|
||||||
|
y) Yes this is OK (default)
|
||||||
|
e) Edit this remote
|
||||||
|
d) Delete this remote
|
||||||
|
y/e/d> y
|
||||||
|
```
|
||||||
|
|
||||||
|
### Modification times and hashes
|
||||||
|
|
||||||
|
Shade does not support hashes and writing mod times.
|
||||||
|
|
||||||
|
|
||||||
|
### Transfers
|
||||||
|
|
||||||
|
Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests.
|
||||||
|
|
||||||
|
### Deleting files
|
||||||
|
|
||||||
|
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
|
||||||
|
|
||||||
|
|
||||||
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||||
|
### Standard options
|
||||||
|
|
||||||
|
Here are the Standard options specific to shade (Shade FS).
|
||||||
|
|
||||||
|
#### --shade-drive-id
|
||||||
|
|
||||||
|
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: drive_id
|
||||||
|
- Env Var: RCLONE_SHADE_DRIVE_ID
|
||||||
|
- Type: string
|
||||||
|
- Required: true
|
||||||
|
|
||||||
|
#### --shade-api-key
|
||||||
|
|
||||||
|
An API key for your account. You can find this under Settings > API Keys
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: api_key
|
||||||
|
- Env Var: RCLONE_SHADE_API_KEY
|
||||||
|
- Type: string
|
||||||
|
- Required: true
|
||||||
|
|
||||||
|
### Advanced options
|
||||||
|
|
||||||
|
Here are the Advanced options specific to shade (Shade FS).
|
||||||
|
|
||||||
|
#### --shade-endpoint
|
||||||
|
|
||||||
|
Endpoint for the service.
|
||||||
|
|
||||||
|
Leave blank normally.
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: endpoint
|
||||||
|
- Env Var: RCLONE_SHADE_ENDPOINT
|
||||||
|
- Type: string
|
||||||
|
- Required: false
|
||||||
|
|
||||||
|
#### --shade-chunk-size
|
||||||
|
|
||||||
|
Chunk size to use for uploading.
|
||||||
|
|
||||||
|
Any files larger than this will be uploaded in chunks of this size.
|
||||||
|
|
||||||
|
Note that this is stored in memory per transfer, so increasing it will
|
||||||
|
increase memory usage.
|
||||||
|
|
||||||
|
Minimum is 5MB, maximum is 5GB.
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: chunk_size
|
||||||
|
- Env Var: RCLONE_SHADE_CHUNK_SIZE
|
||||||
|
- Type: SizeSuffix
|
||||||
|
- Default: 64Mi
|
||||||
|
|
||||||
|
#### --shade-encoding
|
||||||
|
|
||||||
|
The encoding for the backend.
|
||||||
|
|
||||||
|
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: encoding
|
||||||
|
- Env Var: RCLONE_SHADE_ENCODING
|
||||||
|
- Type: Encoding
|
||||||
|
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||||
|
|
||||||
|
#### --shade-description
|
||||||
|
|
||||||
|
Description of the remote.
|
||||||
|
|
||||||
|
Properties:
|
||||||
|
|
||||||
|
- Config: description
|
||||||
|
- Env Var: RCLONE_SHADE_DESCRIPTION
|
||||||
|
- Type: string
|
||||||
|
- Required: false
|
||||||
|
|
||||||
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
Note that Shade is case insensitive so you can't have a file called
|
||||||
|
"Hello.doc" and one called "hello.doc".
|
||||||
|
|
||||||
|
Shade only supports filenames up to 255 characters in length.
|
||||||
|
|
||||||
|
`rclone about` is not supported by the Shade backend. Backends without
|
||||||
|
this capability cannot determine free space for an rclone mount or
|
||||||
|
use policy `mfs` (most free space) as a member of an rclone union
|
||||||
|
remote.
|
||||||
|
|
||||||
|
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
|
|
||||||
|
## Backend commands
|
||||||
|
|
||||||
|
Here are the commands specific to the shade backend.
|
||||||
|
|
||||||
|
Run them with
|
||||||
|
|
||||||
|
rclone backend COMMAND remote:
|
||||||
|
|
||||||
|
The help below will explain what arguments each command takes.
|
||||||
|
|
||||||
|
See the [backend](/commands/rclone_backend/) command for more
|
||||||
|
info on how to pass options and arguments.
|
||||||
|
|
||||||
|
These can be run on a running backend using the rc command
|
||||||
|
[backend/command](/rc/#backend-command).
|
||||||
|
|
||||||
|
|
||||||
@@ -107,6 +107,7 @@
|
|||||||
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
||||||
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
||||||
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
||||||
|
<a class="dropdown-item" href="/shade/"><i class="fa fa-moon fa-fw"></i> Shade</a>
|
||||||
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
||||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||||
|
|||||||
@@ -372,7 +372,7 @@ func (p *pipedInput) Read(b []byte) (int, error) {
|
|||||||
return p.Reader.Read(b)
|
return p.Reader.Read(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *pipedInput) Seek(_ int64, _ int) (int64, error) {
|
func (*pipedInput) Seek(int64, int) (int64, error) {
|
||||||
return 0, fmt.Errorf("Seek not supported")
|
return 0, fmt.Errorf("Seek not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ const (
|
|||||||
DumpGoRoutines
|
DumpGoRoutines
|
||||||
DumpOpenFiles
|
DumpOpenFiles
|
||||||
DumpMapper
|
DumpMapper
|
||||||
|
DumpCurl
|
||||||
)
|
)
|
||||||
|
|
||||||
type dumpChoices struct{}
|
type dumpChoices struct{}
|
||||||
@@ -29,6 +30,7 @@ func (dumpChoices) Choices() []BitsChoicesInfo {
|
|||||||
{uint64(DumpGoRoutines), "goroutines"},
|
{uint64(DumpGoRoutines), "goroutines"},
|
||||||
{uint64(DumpOpenFiles), "openfiles"},
|
{uint64(DumpOpenFiles), "openfiles"},
|
||||||
{uint64(DumpMapper), "mapper"},
|
{uint64(DumpMapper), "mapper"},
|
||||||
|
{uint64(DumpCurl), "curl"},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ import (
|
|||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -24,6 +26,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/structs"
|
"github.com/rclone/rclone/lib/structs"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
"golang.org/x/net/publicsuffix"
|
"golang.org/x/net/publicsuffix"
|
||||||
|
"moul.io/http2curl/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -439,6 +442,18 @@ func cleanAuths(buf []byte) []byte {
|
|||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cleanCurl gets rid of Auth headers in a curl command
|
||||||
|
func cleanCurl(cmd *http2curl.CurlCommand) {
|
||||||
|
for _, authBuf := range authBufs {
|
||||||
|
auth := "'" + string(authBuf)
|
||||||
|
for i, arg := range *cmd {
|
||||||
|
if strings.HasPrefix(arg, auth) {
|
||||||
|
(*cmd)[i] = auth + "XXXX'"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var expireWindow = 30 * time.Second
|
var expireWindow = 30 * time.Second
|
||||||
|
|
||||||
func isCertificateExpired(cc *tls.Config) bool {
|
func isCertificateExpired(cc *tls.Config) bool {
|
||||||
@@ -492,6 +507,26 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
|||||||
fs.Debugf(nil, "%s", separatorReq)
|
fs.Debugf(nil, "%s", separatorReq)
|
||||||
logMutex.Unlock()
|
logMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
// Dump curl request
|
||||||
|
if t.dump&(fs.DumpCurl) != 0 {
|
||||||
|
cmd, err := http2curl.GetCurlCommand(req)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create curl command: %v", err)
|
||||||
|
} else {
|
||||||
|
// Patch -X HEAD into --head
|
||||||
|
for i := range len(*cmd) - 1 {
|
||||||
|
if (*cmd)[i] == "-X" && (*cmd)[i+1] == "'HEAD'" {
|
||||||
|
(*cmd)[i] = "--head"
|
||||||
|
*cmd = slices.Delete(*cmd, i+1, i+2)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t.dump&fs.DumpAuth == 0 {
|
||||||
|
cleanCurl(cmd)
|
||||||
|
}
|
||||||
|
fs.Debugf(nil, "HTTP REQUEST: %v", cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
// Do round trip
|
// Do round trip
|
||||||
resp, err = t.Transport.RoundTrip(req)
|
resp, err = t.Transport.RoundTrip(req)
|
||||||
// Logf response
|
// Logf response
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"moul.io/http2curl/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCleanAuth(t *testing.T) {
|
func TestCleanAuth(t *testing.T) {
|
||||||
@@ -61,6 +62,32 @@ func TestCleanAuths(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCleanCurl(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in []string
|
||||||
|
want []string
|
||||||
|
}{{
|
||||||
|
[]string{""},
|
||||||
|
[]string{""},
|
||||||
|
}, {
|
||||||
|
[]string{"floo"},
|
||||||
|
[]string{"floo"},
|
||||||
|
}, {
|
||||||
|
[]string{"'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'Authorization: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}, {
|
||||||
|
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'X-Auth-Token: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}, {
|
||||||
|
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'X-Auth-Token: XXXX'", "'Authorization: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}} {
|
||||||
|
in := http2curl.CurlCommand(test.in)
|
||||||
|
cleanCurl(&in)
|
||||||
|
assert.Equal(t, test.want, test.in, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var certSerial = int64(0)
|
var certSerial = int64(0)
|
||||||
|
|
||||||
// Create a test certificate and key pair that is valid for a specific
|
// Create a test certificate and key pair that is valid for a specific
|
||||||
|
|||||||
@@ -209,7 +209,7 @@ func InitLogging() {
|
|||||||
// Log file output
|
// Log file output
|
||||||
if Opt.File != "" {
|
if Opt.File != "" {
|
||||||
var w io.Writer
|
var w io.Writer
|
||||||
if Opt.MaxSize == 0 {
|
if Opt.MaxSize < 0 {
|
||||||
// No log rotation - just open the file as normal
|
// No log rotation - just open the file as normal
|
||||||
// We'll capture tracebacks like this too.
|
// We'll capture tracebacks like this too.
|
||||||
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||||
|
|||||||
@@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
|||||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||||
|
accounting.GlobalStats().ResetCounters()
|
||||||
|
|
||||||
r.CheckLocalListing(
|
r.CheckLocalListing(
|
||||||
t,
|
t,
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/all"
|
_ "github.com/rclone/rclone/backend/all"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
@@ -507,6 +508,7 @@ func TestError(t *testing.T) {
|
|||||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||||
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
accounting.GlobalStats().ResetCounters()
|
||||||
|
|
||||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||||
|
|||||||
@@ -662,6 +662,10 @@ backends:
|
|||||||
ignoretests:
|
ignoretests:
|
||||||
- cmd/bisync
|
- cmd/bisync
|
||||||
- cmd/gitannex
|
- cmd/gitannex
|
||||||
|
- backend: "shade"
|
||||||
|
remote: "TestShade:"
|
||||||
|
fastlist: false
|
||||||
|
|
||||||
- backend: "archive"
|
- backend: "archive"
|
||||||
remote: "TestArchive:"
|
remote: "TestArchive:"
|
||||||
fastlist: false
|
fastlist: false
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rclone/rclone/vfs"
|
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -111,9 +110,6 @@ func TestWriteFileDup(t *testing.T) {
|
|||||||
|
|
||||||
var dupFd uintptr
|
var dupFd uintptr
|
||||||
dupFd, err = writeTestDup(fh.Fd())
|
dupFd, err = writeTestDup(fh.Fd())
|
||||||
if err == vfs.ENOSYS {
|
|
||||||
t.Skip("dup not supported on this platform")
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
dupFile := os.NewFile(dupFd, fh.Name())
|
dupFile := os.NewFile(dupFd, fh.Name())
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !linux && !darwin && !freebsd && !openbsd && !windows
|
//go:build !linux && !darwin && !freebsd && !windows
|
||||||
|
|
||||||
package vfstest
|
package vfstest
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build linux || darwin || freebsd || openbsd
|
//go:build linux || darwin || freebsd
|
||||||
|
|
||||||
package vfstest
|
package vfstest
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user