mirror of
https://github.com/rclone/rclone.git
synced 2025-12-30 23:23:30 +00:00
Compare commits
28 Commits
a99d155fd4
...
fix-9031-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5bf9b629f | ||
|
|
6f81885ebf | ||
|
|
976aa6b416 | ||
|
|
b3a0383ca3 | ||
|
|
c13f129339 | ||
|
|
748d8c8957 | ||
|
|
4d379efcbb | ||
|
|
e5e6a4b5ae | ||
|
|
df18e8c55b | ||
|
|
f4e17d8b0b | ||
|
|
e5c69511bc | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b | ||
|
|
233fef5c4d | ||
|
|
b9586c3e03 | ||
|
|
0dc0ab1330 | ||
|
|
a6bbdb35a0 | ||
|
|
b33cb77b6c | ||
|
|
d51322bb5f | ||
|
|
e718ab6091 | ||
|
|
0a9e6e130f | ||
|
|
3358b9049c | ||
|
|
847734d421 | ||
|
|
f7b255d4ec | ||
|
|
24c752ed9e |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -229,7 +229,7 @@ jobs:
|
||||
cache: false
|
||||
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
|
||||
- name: Load Go Build Cache for Docker
|
||||
id: go-cache
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -17,6 +17,14 @@ linters:
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
exclusions:
|
||||
rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid meaningless package names'
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid package names that conflict with Go standard library package names'
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
@@ -136,6 +144,7 @@ linters:
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
@@ -109,6 +109,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
@@ -55,6 +55,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/shade"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
|
||||
@@ -133,32 +133,23 @@ type File struct {
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// StorageAPI is as returned from the b2_authorize_account call
|
||||
type StorageAPI struct {
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||
ID string `json:"id"` // ID of bucket
|
||||
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||
} `json:"buckets"`
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||
Storage StorageAPI `json:"storageApi"`
|
||||
} `json:"apiInfo"`
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
|
||||
120
backend/b2/b2.go
120
backend/b2/b2.go
@@ -607,29 +607,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to one or more buckets, one of them must exist
|
||||
// and be ours.
|
||||
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||
var rootFound = false
|
||||
var rootID string
|
||||
for _, b := range buckets {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||
if allowedBucket == "" {
|
||||
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if allowedBucket == f.rootBucket {
|
||||
rootFound = true
|
||||
rootID = b.ID
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if !rootFound {
|
||||
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, rootID)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -655,7 +643,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v4/b2_authorize_account",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
@@ -668,13 +656,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -1079,68 +1067,44 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: b.ID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses = append(responses, response)
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.bucketIDMutex.Lock()
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketID = make(map[string]string, 1)
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
f.bucketTypeMutex.Unlock()
|
||||
f.bucketIDMutex.Unlock()
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err := fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err = fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1642,7 +1606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.APIs.Storage.DownloadURL
|
||||
RootURL = f.info.DownloadURL
|
||||
} else {
|
||||
RootURL = f.opt.DownloadURL
|
||||
}
|
||||
@@ -1993,7 +1957,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
@@ -346,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Name: "endpoint",
|
||||
Help: `Custom endpoint for the storage API. Leave blank to use the provider default.
|
||||
|
||||
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
|
||||
the subpath will be ignored during upload operations due to a limitation in the
|
||||
underlying Google API Go client library.
|
||||
Download and listing operations will work correctly with the full endpoint path.
|
||||
If you require subpath support for uploads, avoid using subpaths in your custom
|
||||
endpoint configuration.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.example.org",
|
||||
Help: "Specify a custom endpoint",
|
||||
}, {
|
||||
Value: "storage.example.org:4443",
|
||||
Help: "Specifying a custom endpoint with port",
|
||||
}, {
|
||||
Value: "storage.example.org:4443/gcs/api",
|
||||
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
name: Linode
|
||||
description: Linode Object Storage
|
||||
endpoint:
|
||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
||||
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
|
||||
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
|
||||
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
|
||||
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
|
||||
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
|
||||
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
|
||||
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
|
||||
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
|
||||
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
|
||||
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
|
||||
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
|
||||
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
|
||||
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
|
||||
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
|
||||
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
|
||||
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
|
||||
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
|
||||
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
|
||||
@@ -2,7 +2,17 @@ name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
ru-3: St. Petersburg
|
||||
ru-7: Moscow
|
||||
gis-1: Moscow
|
||||
kz-1: Kazakhstan
|
||||
uz-2: Uzbekistan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.ru-1.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-3.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.kz-1.storage.selcloud.ru: Kazakhstan
|
||||
s3.uz-2.storage.selcloud.ru: Uzbekistan
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
|
||||
27
backend/shade/api/types.go
Normal file
27
backend/shade/api/types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package api has type definitions for shade
|
||||
package api
|
||||
|
||||
// ListDirResponse -------------------------------------------------
|
||||
// Format from shade api
|
||||
type ListDirResponse struct {
|
||||
Type string `json:"type"` // "file" or "tree"
|
||||
Path string `json:"path"` // Full path including root
|
||||
Ino int `json:"ino"` // inode number
|
||||
Mtime int64 `json:"mtime"` // Modified time in milliseconds
|
||||
Ctime int64 `json:"ctime"` // Created time in milliseconds
|
||||
Size int64 `json:"size"` // Size in bytes
|
||||
Hash string `json:"hash"` // MD5 hash
|
||||
Draft bool `json:"draft"` // Whether this is a draft file
|
||||
}
|
||||
|
||||
// PartURL Type for multipart upload/download
|
||||
type PartURL struct {
|
||||
URL string `json:"url"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string
|
||||
PartNumber int32
|
||||
}
|
||||
1017
backend/shade/shade.go
Normal file
1017
backend/shade/shade.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/shade/shade_test.go
Normal file
21
backend/shade/shade_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package shade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestShade"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*shade.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
336
backend/shade/upload.go
Normal file
336
backend/shade/upload.go
Normal file
@@ -0,0 +1,336 @@
|
||||
//multipart upload for shade
|
||||
|
||||
package shade
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
type shadeChunkWriter struct {
|
||||
initToken string
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
o *Object
|
||||
completedParts []api.CompletedPart
|
||||
completedPartsMu sync.Mutex
|
||||
}
|
||||
|
||||
// uploadMultipart handles multipart upload for larger files
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var shadeWriter = chunkWriter.(*shadeChunkWriter)
|
||||
o.size = shadeWriter.size
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
fs.FixRangeOption(options, size)
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 640 GB.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
token, err := o.fs.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to get token: %w", err)
|
||||
}
|
||||
|
||||
err = f.ensureParentDirectories(ctx, remote)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
|
||||
}
|
||||
|
||||
fullPath := remote
|
||||
if f.root != "" {
|
||||
fullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
// Initiate multipart upload
|
||||
type initRequest struct {
|
||||
Path string `json:"path"`
|
||||
PartSize int64 `json:"partSize"`
|
||||
}
|
||||
reqBody := initRequest{
|
||||
Path: fullPath,
|
||||
PartSize: int64(chunkSize),
|
||||
}
|
||||
|
||||
var initResp struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
|
||||
RootURL: o.fs.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunkWriter := &shadeChunkWriter{
|
||||
initToken: initResp.Token,
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: f.opt.Concurrency,
|
||||
LeavePartsOnError: false,
|
||||
}
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read chunk
|
||||
var chunk bytes.Buffer
|
||||
n, err := io.Copy(&chunk, reader)
|
||||
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read chunk: %w", err)
|
||||
}
|
||||
// Get presigned URL for this part
|
||||
var partURL api.PartURL
|
||||
|
||||
partOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get part URL: %w", err)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: partURL.URL,
|
||||
Body: &chunk,
|
||||
ContentType: "",
|
||||
ContentLength: &n,
|
||||
}
|
||||
|
||||
// Add headers
|
||||
var uploadRes *http.Response
|
||||
if len(partURL.Headers) > 0 {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
for k, v := range partURL.Headers {
|
||||
opts.ExtraHeaders[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
uploadRes, err = s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
|
||||
}
|
||||
|
||||
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(uploadRes.Body)
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Get ETag from response
|
||||
etag := uploadRes.Header.Get("ETag")
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
|
||||
s.completedPartsMu.Lock()
|
||||
defer s.completedPartsMu.Unlock()
|
||||
s.completedParts = append(s.completedParts, api.CompletedPart{
|
||||
PartNumber: int32(chunkNumber + 1),
|
||||
ETag: etag,
|
||||
})
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close complete chunked writer finalising the file.
|
||||
func (s *shadeChunkWriter) Close(ctx context.Context) error {
|
||||
|
||||
// Complete multipart upload
|
||||
sort.Slice(s.completedParts, func(i, j int) bool {
|
||||
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
|
||||
})
|
||||
|
||||
type completeRequest struct {
|
||||
Parts []api.CompletedPart `json:"parts"`
|
||||
}
|
||||
var completeBody completeRequest
|
||||
|
||||
if s.completedParts == nil {
|
||||
completeBody = completeRequest{Parts: []api.CompletedPart{}}
|
||||
} else {
|
||||
completeBody = completeRequest{Parts: s.completedParts}
|
||||
}
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
completeOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
var response http.Response
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
|
||||
|
||||
if err != nil && res == nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusTooManyRequests {
|
||||
return true, err // Retry on 429
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort chunk write
|
||||
//
|
||||
// You can and should call Abort without calling Close.
|
||||
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
|
||||
return false, nil // Don't retry abort
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -84,6 +84,7 @@ docs = [
|
||||
"protondrive.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"shade.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestRun(t *testing.T) {
|
||||
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
|
||||
}
|
||||
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
|
||||
if privateKeyErr != nil {
|
||||
if publicKeyError != nil {
|
||||
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -45,6 +45,10 @@ var OptionsInfo = fs.Options{{
|
||||
Name: "disable_dir_list",
|
||||
Default: false,
|
||||
Help: "Disable HTML directory list on GET request for a directory",
|
||||
}, {
|
||||
Name: "disable_zip",
|
||||
Default: false,
|
||||
Help: "Disable zip download of directories",
|
||||
}}.
|
||||
Add(libhttp.ConfigInfo).
|
||||
Add(libhttp.AuthConfigInfo).
|
||||
@@ -57,6 +61,7 @@ type Options struct {
|
||||
Template libhttp.TemplateConfig
|
||||
EtagHash string `config:"etag_hash"`
|
||||
DisableDirList bool `config:"disable_dir_list"`
|
||||
DisableZip bool `config:"disable_zip"`
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
@@ -408,6 +413,24 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
return
|
||||
}
|
||||
dir := node.(*vfs.Dir)
|
||||
|
||||
if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip {
|
||||
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
|
||||
zipName := path.Base(dirRemote)
|
||||
if dirRemote == "" {
|
||||
zipName = "root"
|
||||
}
|
||||
rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
|
||||
rw.Header().Set("Content-Type", "application/zip")
|
||||
rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
|
||||
err := vfs.CreateZip(ctx, dir, rw)
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, rw, "Failed to create zip", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
|
||||
if err != nil {
|
||||
@@ -417,6 +440,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
|
||||
directory.DisableZip = w.opt.DisableZip
|
||||
for _, node := range dirEntries {
|
||||
if vfscommon.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
|
||||
@@ -202,6 +202,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||
|
||||
@@ -1050,3 +1050,11 @@ put them back in again. -->
|
||||
- Nikolay Kiryanov <nikolay@kiryanov.ru>
|
||||
- Diana <5275194+DianaNites@users.noreply.github.com>
|
||||
- Duncan Smart <duncan.smart@gmail.com>
|
||||
- vicerace <vicerace@sohu.com>
|
||||
- Cliff Frey <cliff@openai.com>
|
||||
- Vladislav Tropnikov <vtr.name@gmail.com>
|
||||
- Leo <i@hardrain980.com>
|
||||
- Johannes Rothe <mail@johannes-rothe.de>
|
||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||
- jhasse-shade <jacob@shade.inc>
|
||||
|
||||
@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
||||
All copy commands send the following 4 requests:
|
||||
|
||||
```text
|
||||
/b2api/v4/b2_authorize_account
|
||||
/b2api/v1/b2_authorize_account
|
||||
/b2api/v1/b2_create_bucket
|
||||
/b2api/v1/b2_list_buckets
|
||||
/b2api/v1/b2_list_file_names
|
||||
|
||||
@@ -6,6 +6,22 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.72.1 - 2025-12-10
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
|
||||
|
||||
- Bug Fixes
|
||||
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
|
||||
- doc fixes (Duncan Smart, Nick Craig-Wood)
|
||||
- configfile: Fix piped config support (Jonas Tingeborn)
|
||||
- log
|
||||
- Fix PID not included in JSON log output (Tingsong Xu)
|
||||
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
|
||||
- Google Cloud Storage
|
||||
- Improve endpoint parameter docs (Johannes Rothe)
|
||||
- S3
|
||||
- Add missing regions for Selectel provider (Nick Craig-Wood)
|
||||
|
||||
## v1.72.0 - 2025-11-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
||||
|
||||
@@ -82,6 +82,7 @@ See the following for detailed instructions for
|
||||
- [rsync.net](/sftp/#rsync-net)
|
||||
- [Seafile](/seafile/)
|
||||
- [SFTP](/sftp/)
|
||||
- [Shade](/shade/)
|
||||
- [Sia](/sia/)
|
||||
- [SMB](/smb/)
|
||||
- [Storj](/storj/)
|
||||
|
||||
@@ -59,6 +59,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
||||
| Seafile | - | - | No | No | - | - |
|
||||
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
||||
| Shade | - | - | Yes | No | - | - |
|
||||
| Sia | - | - | No | No | - | - |
|
||||
| SMB | - | R/W | Yes | No | - | - |
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
@@ -540,7 +541,7 @@ upon backend-specific capabilities.
|
||||
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
|
||||
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
|
||||
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
|
||||
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |
|
||||
|
||||
218
docs/content/shade.md
Normal file
218
docs/content/shade.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# {{< icon "fa fa-moon" >}} Shade
|
||||
|
||||
This is a backend for the [Shade](https://shade.inc/) platform
|
||||
|
||||
## About Shade
|
||||
|
||||
[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage.
|
||||
|
||||
|
||||
## Accounts & Pricing
|
||||
|
||||
To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making a Shade configuration.
|
||||
|
||||
First, create a [create a free account](https://app.shade.inc/) account and choose a plan.
|
||||
|
||||
You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively.
|
||||
|
||||
Now run
|
||||
|
||||
`rclone config`
|
||||
|
||||
Follow this interactive process:
|
||||
|
||||
```sh
|
||||
$ rclone config
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> Shade
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[OTHER OPTIONS]
|
||||
xx / Shade FS
|
||||
\ (shade)
|
||||
[OTHER OPTIONS]
|
||||
Storage> xx
|
||||
|
||||
Option drive_id.
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
Enter a value.
|
||||
drive_id> [YOUR_ID]
|
||||
|
||||
Option api_key.
|
||||
An API key for your account.
|
||||
Enter a value.
|
||||
api_key> [YOUR_API_KEY]
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: shade
|
||||
- drive_id: [YOUR_ID]
|
||||
- api_key: [YOUR_API_KEY]
|
||||
Keep this "Shade" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Shade does not support hashes and writing mod times.
|
||||
|
||||
|
||||
### Transfers
|
||||
|
||||
Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests.
|
||||
|
||||
### Deleting files
|
||||
|
||||
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
|
||||
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-drive-id
|
||||
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: drive_id
|
||||
- Env Var: RCLONE_SHADE_DRIVE_ID
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --shade-api-key
|
||||
|
||||
An API key for your account. You can find this under Settings > API Keys
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_SHADE_API_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-endpoint
|
||||
|
||||
Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_SHADE_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --shade-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that this is stored in memory per transfer, so increasing it will
|
||||
increase memory usage.
|
||||
|
||||
Minimum is 5MB, maximum is 5GB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_SHADE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 64Mi
|
||||
|
||||
#### --shade-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_SHADE_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --shade-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_SHADE_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Limitations
|
||||
|
||||
Note that Shade is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Shade only supports filenames up to 255 characters in length.
|
||||
|
||||
`rclone about` is not supported by the Shade backend. Backends without
|
||||
this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the shade backend.
|
||||
|
||||
Run them with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See the [backend](/commands/rclone_backend/) command for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
|
||||
@@ -107,6 +107,7 @@
|
||||
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
||||
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
||||
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
||||
<a class="dropdown-item" href="/shade/"><i class="fa fa-moon fa-fw"></i> Shade</a>
|
||||
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||
|
||||
@@ -2,6 +2,7 @@ package configfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -362,3 +363,39 @@ func TestConfigFileSaveSymlinkAbsolute(t *testing.T) {
|
||||
testSymlink(t, link, target, resolvedTarget)
|
||||
})
|
||||
}
|
||||
|
||||
type pipedInput struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (p *pipedInput) Read(b []byte) (int, error) {
|
||||
return p.Reader.Read(b)
|
||||
}
|
||||
|
||||
func (*pipedInput) Seek(int64, int) (int64, error) {
|
||||
return 0, fmt.Errorf("Seek not supported")
|
||||
}
|
||||
|
||||
func TestPipedConfig(t *testing.T) {
|
||||
t.Run("DoesNotSupportSeeking", func(t *testing.T) {
|
||||
r := &pipedInput{strings.NewReader("")}
|
||||
_, err := r.Seek(0, io.SeekStart)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("IsSupported", func(t *testing.T) {
|
||||
r := &pipedInput{strings.NewReader(configData)}
|
||||
_, err := config.Decrypt(r)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("PlainTextConfigIsNotConsumedByCryptCheck", func(t *testing.T) {
|
||||
in := &pipedInput{strings.NewReader(configData)}
|
||||
|
||||
r, _ := config.Decrypt(in)
|
||||
got, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, configData, string(got))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -77,8 +77,9 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
|
||||
return nil, errors.New("unsupported configuration encryption - update rclone for support")
|
||||
}
|
||||
// Restore non-seekable plain-text stream to its original state
|
||||
if _, err := b.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
return io.MultiReader(strings.NewReader(l+"\n"), r), nil
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func InitLogging() {
|
||||
// Log file output
|
||||
if Opt.File != "" {
|
||||
var w io.Writer
|
||||
if Opt.MaxSize == 0 {
|
||||
if Opt.MaxSize < 0 {
|
||||
// No log rotation - just open the file as normal
|
||||
// We'll capture tracebacks like this too.
|
||||
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
|
||||
@@ -310,6 +310,10 @@ func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.R
|
||||
r.AddAttrs(
|
||||
slog.String("source", getCaller(2)),
|
||||
)
|
||||
// Add PID if requested
|
||||
if h.format&logFormatPid != 0 {
|
||||
r.AddAttrs(slog.Int("pid", os.Getpid()))
|
||||
}
|
||||
h.mu.Lock()
|
||||
err = h.jsonHandler.Handle(ctx, r)
|
||||
if err == nil {
|
||||
|
||||
@@ -198,6 +198,17 @@ func TestAddOutputUseJSONLog(t *testing.T) {
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test JSON log includes PID when logFormatPid is set.
|
||||
func TestJSONLogWithPid(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatJSON|logFormatPid)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, fmt.Sprintf(`"pid":%d`, os.Getpid()))
|
||||
}
|
||||
|
||||
// Test WithAttrs and WithGroup return new handlers with same settings.
|
||||
func TestWithAttrsAndGroup(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
@@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(
|
||||
t,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -507,6 +508,7 @@ func TestError(t *testing.T) {
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
assert.Error(t, err)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
|
||||
@@ -662,6 +662,10 @@ backends:
|
||||
ignoretests:
|
||||
- cmd/bisync
|
||||
- cmd/gitannex
|
||||
- backend: "shade"
|
||||
remote: "TestShade:"
|
||||
fastlist: false
|
||||
|
||||
- backend: "archive"
|
||||
remote: "TestArchive:"
|
||||
fastlist: false
|
||||
|
||||
Reference in New Issue
Block a user