mirror of
https://github.com/rclone/rclone.git
synced 2025-12-30 23:23:30 +00:00
Compare commits
29 Commits
a99d155fd4
...
drime
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31976d0bfa | ||
|
|
c919fad933 | ||
|
|
f7f32d1a7c | ||
|
|
672d9469eb | ||
|
|
8e94a154ed | ||
|
|
4d858210b3 | ||
|
|
aa6973b89e | ||
|
|
42373c1cff | ||
|
|
e5e6a4b5ae | ||
|
|
df18e8c55b | ||
|
|
f4e17d8b0b | ||
|
|
e5c69511bc | ||
|
|
175d4bc553 | ||
|
|
4851f1796c | ||
|
|
4ff8899b2c | ||
|
|
8f29a0b0a1 | ||
|
|
8b0e76e53b | ||
|
|
233fef5c4d | ||
|
|
b9586c3e03 | ||
|
|
0dc0ab1330 | ||
|
|
a6bbdb35a0 | ||
|
|
b33cb77b6c | ||
|
|
d51322bb5f | ||
|
|
e718ab6091 | ||
|
|
0a9e6e130f | ||
|
|
3358b9049c | ||
|
|
847734d421 | ||
|
|
f7b255d4ec | ||
|
|
24c752ed9e |
@@ -17,6 +17,14 @@ linters:
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
exclusions:
|
||||
rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid meaningless package names'
|
||||
- linters:
|
||||
- revive
|
||||
text: 'var-naming: avoid package names that conflict with Go standard library package names'
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
@@ -136,6 +144,7 @@ linters:
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
@@ -38,6 +38,7 @@ directories to and from different cloud storage providers.
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
@@ -109,6 +110,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- Shade [:page_facing_up:](https://rclone.org/shade/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drime"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
@@ -55,6 +56,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/shade"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
|
||||
@@ -1081,21 +1081,10 @@ type listBucketFn func(*api.Bucket) error
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
call := func(id string) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: b.ID,
|
||||
BucketID: id,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
@@ -1114,6 +1103,32 @@ func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBuck
|
||||
return err
|
||||
}
|
||||
responses = append(responses, response)
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
err := call(b.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
|
||||
err := call("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f.bucketIDMutex.Lock()
|
||||
|
||||
237
backend/drime/api/types.go
Normal file
237
backend/drime/api/types.go
Normal file
@@ -0,0 +1,237 @@
|
||||
// Package api has type definitions for drime
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
)
|
||||
|
||||
// User information
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID json.Number `json:"id"`
|
||||
Avatar string `json:"avatar"`
|
||||
ModelType string `json:"model_type"`
|
||||
OwnsEntry bool `json:"owns_entry"`
|
||||
EntryPermissions []any `json:"entry_permissions"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
|
||||
// Permissions for a file
|
||||
type Permissions struct {
|
||||
FilesUpdate bool `json:"files.update"`
|
||||
FilesCreate bool `json:"files.create"`
|
||||
FilesDownload bool `json:"files.download"`
|
||||
FilesDelete bool `json:"files.delete"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by /drive/file-entries
|
||||
type Item struct {
|
||||
ID json.Number `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description any `json:"description"`
|
||||
FileName string `json:"file_name"`
|
||||
Mime string `json:"mime"`
|
||||
Color any `json:"color"`
|
||||
Backup bool `json:"backup"`
|
||||
Tracked int `json:"tracked"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
UserID json.Number `json:"user_id"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DeletedAt any `json:"deleted_at"`
|
||||
IsDeleted int `json:"is_deleted"`
|
||||
Path string `json:"path"`
|
||||
DiskPrefix any `json:"disk_prefix"`
|
||||
Type string `json:"type"`
|
||||
Extension any `json:"extension"`
|
||||
FileHash any `json:"file_hash"`
|
||||
Public bool `json:"public"`
|
||||
Thumbnail bool `json:"thumbnail"`
|
||||
MuxStatus any `json:"mux_status"`
|
||||
ThumbnailURL any `json:"thumbnail_url"`
|
||||
WorkspaceID int `json:"workspace_id"`
|
||||
IsEncrypted int `json:"is_encrypted"`
|
||||
Iv any `json:"iv"`
|
||||
VaultID any `json:"vault_id"`
|
||||
OwnerID int `json:"owner_id"`
|
||||
Hash string `json:"hash"`
|
||||
URL string `json:"url"`
|
||||
Users []User `json:"users"`
|
||||
Tags []any `json:"tags"`
|
||||
Permissions Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Listing response
|
||||
type Listing struct {
|
||||
CurrentPage int `json:"current_page"`
|
||||
Data []Item `json:"data"`
|
||||
From int `json:"from"`
|
||||
LastPage int `json:"last_page"`
|
||||
NextPage int `json:"next_page"`
|
||||
PerPage int `json:"per_page"`
|
||||
PrevPage int `json:"prev_page"`
|
||||
To int `json:"to"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// UploadResponse for a file
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest for a folder
|
||||
type CreateFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
ParentID json.Number `json:"parentId,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse for a folder
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Folder Item `json:"folder"`
|
||||
}
|
||||
|
||||
// Error is returned from drime when things go wrong
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Message)
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// DeleteRequest is the input to DELETE /file-entries
|
||||
type DeleteRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DeleteForever bool `json:"deleteForever"`
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /file-entries
|
||||
type DeleteResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Errors map[string]string `json:"errors"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /file-entries/{id}/
|
||||
type UpdateItemResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /file-entries/move
|
||||
type MoveRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /file-entries/move
|
||||
type MoveResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /file-entries/duplicate
|
||||
type CopyRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /file-entries/duplicate
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// MultiPartCreateRequest is the input of POST /s3/multipart/create
|
||||
type MultiPartCreateRequest struct {
|
||||
Filename string `json:"filename"`
|
||||
Mime string `json:"mime"`
|
||||
Size int64 `json:"size"`
|
||||
Extension string `json:"extension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartCreateResponse is returned by POST /s3/multipart/create
|
||||
type MultiPartCreateResponse struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string `json:"ETag"`
|
||||
PartNumber int32 `json:"PartNumber"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
PartNumbers []int `json:"partNumbers"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsResponse struct {
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
PartNumber int32 `json:"partNumber"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
|
||||
type MultiPartCompleteRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
Parts []CompletedPart `json:"parts"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
|
||||
type MultiPartCompleteResponse struct {
|
||||
Location string `json:"location"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesRequest is the input to POST /s3/entries
|
||||
type MultiPartEntriesRequest struct {
|
||||
ClientMime string `json:"clientMime"`
|
||||
ClientName string `json:"clientName"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
ClientExtension string `json:"clientExtension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesResponse is the result of POST /s3/entries
|
||||
type MultiPartEntriesResponse struct {
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MultiPartAbort is the input of POST /s3/multipart/abort
|
||||
type MultiPartAbort struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
1564
backend/drime/drime.go
Normal file
1564
backend/drime/drime.go
Normal file
File diff suppressed because it is too large
Load Diff
33
backend/drime/drime_test.go
Normal file
33
backend/drime/drime_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Drime filesystem interface
|
||||
package drime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrime:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
@@ -346,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
Name: "endpoint",
|
||||
Help: `Custom endpoint for the storage API. Leave blank to use the provider default.
|
||||
|
||||
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
|
||||
the subpath will be ignored during upload operations due to a limitation in the
|
||||
underlying Google API Go client library.
|
||||
Download and listing operations will work correctly with the full endpoint path.
|
||||
If you require subpath support for uploads, avoid using subpaths in your custom
|
||||
endpoint configuration.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.example.org",
|
||||
Help: "Specify a custom endpoint",
|
||||
}, {
|
||||
Value: "storage.example.org:4443",
|
||||
Help: "Specifying a custom endpoint with port",
|
||||
}, {
|
||||
Value: "storage.example.org:4443/gcs/api",
|
||||
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
|
||||
@@ -1327,7 +1327,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||
}
|
||||
|
||||
@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
for i := range iVal.NumField() {
|
||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||
}
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,17 @@ name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
ru-3: St. Petersburg
|
||||
ru-7: Moscow
|
||||
gis-1: Moscow
|
||||
kz-1: Kazakhstan
|
||||
uz-2: Uzbekistan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.ru-1.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-3.storage.selcloud.ru: St. Petersburg
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.kz-1.storage.selcloud.ru: Kazakhstan
|
||||
s3.uz-2.storage.selcloud.ru: Uzbekistan
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
|
||||
@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
27
backend/shade/api/types.go
Normal file
27
backend/shade/api/types.go
Normal file
@@ -0,0 +1,27 @@
|
||||
// Package api has type definitions for shade
|
||||
package api
|
||||
|
||||
// ListDirResponse -------------------------------------------------
|
||||
// Format from shade api
|
||||
type ListDirResponse struct {
|
||||
Type string `json:"type"` // "file" or "tree"
|
||||
Path string `json:"path"` // Full path including root
|
||||
Ino int `json:"ino"` // inode number
|
||||
Mtime int64 `json:"mtime"` // Modified time in milliseconds
|
||||
Ctime int64 `json:"ctime"` // Created time in milliseconds
|
||||
Size int64 `json:"size"` // Size in bytes
|
||||
Hash string `json:"hash"` // MD5 hash
|
||||
Draft bool `json:"draft"` // Whether this is a draft file
|
||||
}
|
||||
|
||||
// PartURL Type for multipart upload/download
|
||||
type PartURL struct {
|
||||
URL string `json:"url"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string
|
||||
PartNumber int32
|
||||
}
|
||||
1017
backend/shade/shade.go
Normal file
1017
backend/shade/shade.go
Normal file
File diff suppressed because it is too large
Load Diff
21
backend/shade/shade_test.go
Normal file
21
backend/shade/shade_test.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package shade_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestShade"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*shade.Object)(nil),
|
||||
SkipInvalidUTF8: true,
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
|
||||
},
|
||||
})
|
||||
}
|
||||
336
backend/shade/upload.go
Normal file
336
backend/shade/upload.go
Normal file
@@ -0,0 +1,336 @@
|
||||
//multipart upload for shade
|
||||
|
||||
package shade
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/shade/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
type shadeChunkWriter struct {
|
||||
initToken string
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
o *Object
|
||||
completedParts []api.CompletedPart
|
||||
completedPartsMu sync.Mutex
|
||||
}
|
||||
|
||||
// uploadMultipart handles multipart upload for larger files
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var shadeWriter = chunkWriter.(*shadeChunkWriter)
|
||||
o.size = shadeWriter.size
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
fs.FixRangeOption(options, size)
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 640 GB.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
token, err := o.fs.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to get token: %w", err)
|
||||
}
|
||||
|
||||
err = f.ensureParentDirectories(ctx, remote)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
|
||||
}
|
||||
|
||||
fullPath := remote
|
||||
if f.root != "" {
|
||||
fullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
// Initiate multipart upload
|
||||
type initRequest struct {
|
||||
Path string `json:"path"`
|
||||
PartSize int64 `json:"partSize"`
|
||||
}
|
||||
reqBody := initRequest{
|
||||
Path: fullPath,
|
||||
PartSize: int64(chunkSize),
|
||||
}
|
||||
|
||||
var initResp struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
|
||||
RootURL: o.fs.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunkWriter := &shadeChunkWriter{
|
||||
initToken: initResp.Token,
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: f.opt.Concurrency,
|
||||
LeavePartsOnError: false,
|
||||
}
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Read chunk
|
||||
var chunk bytes.Buffer
|
||||
n, err := io.Copy(&chunk, reader)
|
||||
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read chunk: %w", err)
|
||||
}
|
||||
// Get presigned URL for this part
|
||||
var partURL api.PartURL
|
||||
|
||||
partOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
|
||||
if err != nil {
|
||||
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get part URL: %w", err)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: partURL.URL,
|
||||
Body: &chunk,
|
||||
ContentType: "",
|
||||
ContentLength: &n,
|
||||
}
|
||||
|
||||
// Add headers
|
||||
var uploadRes *http.Response
|
||||
if len(partURL.Headers) > 0 {
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
for k, v := range partURL.Headers {
|
||||
opts.ExtraHeaders[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
uploadRes, err = s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
|
||||
}
|
||||
|
||||
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(uploadRes.Body)
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
|
||||
}
|
||||
|
||||
// Get ETag from response
|
||||
etag := uploadRes.Header.Get("ETag")
|
||||
fs.CheckClose(uploadRes.Body, &err)
|
||||
|
||||
s.completedPartsMu.Lock()
|
||||
defer s.completedPartsMu.Unlock()
|
||||
s.completedParts = append(s.completedParts, api.CompletedPart{
|
||||
PartNumber: int32(chunkNumber + 1),
|
||||
ETag: etag,
|
||||
})
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close complete chunked writer finalising the file.
|
||||
func (s *shadeChunkWriter) Close(ctx context.Context) error {
|
||||
|
||||
// Complete multipart upload
|
||||
sort.Slice(s.completedParts, func(i, j int) bool {
|
||||
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
|
||||
})
|
||||
|
||||
type completeRequest struct {
|
||||
Parts []api.CompletedPart `json:"parts"`
|
||||
}
|
||||
var completeBody completeRequest
|
||||
|
||||
if s.completedParts == nil {
|
||||
completeBody = completeRequest{Parts: []api.CompletedPart{}}
|
||||
} else {
|
||||
completeBody = completeRequest{Parts: s.completedParts}
|
||||
}
|
||||
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
completeOpts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
var response http.Response
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
|
||||
|
||||
if err != nil && res == nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusTooManyRequests {
|
||||
return true, err // Retry on 429
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Abort chunk write
|
||||
//
|
||||
// You can and should call Abort without calling Close.
|
||||
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
|
||||
token, err := s.f.refreshJWTToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
|
||||
RootURL: s.f.endpoint,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": "Bearer " + token,
|
||||
},
|
||||
}
|
||||
|
||||
err = s.f.pacer.Call(func() (bool, error) {
|
||||
res, err := s.f.srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
|
||||
return false, nil // Don't retry abort
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
|
||||
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ docs = [
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"drime.md"
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
@@ -84,6 +85,7 @@ docs = [
|
||||
"protondrive.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"shade.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
|
||||
@@ -153,7 +153,7 @@ func TestRun(t *testing.T) {
|
||||
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
|
||||
}
|
||||
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
|
||||
if privateKeyErr != nil {
|
||||
if publicKeyError != nil {
|
||||
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -45,6 +45,10 @@ var OptionsInfo = fs.Options{{
|
||||
Name: "disable_dir_list",
|
||||
Default: false,
|
||||
Help: "Disable HTML directory list on GET request for a directory",
|
||||
}, {
|
||||
Name: "disable_zip",
|
||||
Default: false,
|
||||
Help: "Disable zip download of directories",
|
||||
}}.
|
||||
Add(libhttp.ConfigInfo).
|
||||
Add(libhttp.AuthConfigInfo).
|
||||
@@ -57,6 +61,7 @@ type Options struct {
|
||||
Template libhttp.TemplateConfig
|
||||
EtagHash string `config:"etag_hash"`
|
||||
DisableDirList bool `config:"disable_dir_list"`
|
||||
DisableZip bool `config:"disable_zip"`
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
@@ -408,6 +413,24 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
return
|
||||
}
|
||||
dir := node.(*vfs.Dir)
|
||||
|
||||
if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip {
|
||||
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
|
||||
zipName := path.Base(dirRemote)
|
||||
if dirRemote == "" {
|
||||
zipName = "root"
|
||||
}
|
||||
rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
|
||||
rw.Header().Set("Content-Type", "application/zip")
|
||||
rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
|
||||
err := vfs.CreateZip(ctx, dir, rw)
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, rw, "Failed to create zip", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
|
||||
if err != nil {
|
||||
@@ -417,6 +440,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
|
||||
directory.DisableZip = w.opt.DisableZip
|
||||
for _, node := range dirEntries {
|
||||
if vfscommon.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
|
||||
@@ -128,6 +128,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
@@ -202,6 +203,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
|
||||
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||
|
||||
@@ -1050,3 +1050,11 @@ put them back in again. -->
|
||||
- Nikolay Kiryanov <nikolay@kiryanov.ru>
|
||||
- Diana <5275194+DianaNites@users.noreply.github.com>
|
||||
- Duncan Smart <duncan.smart@gmail.com>
|
||||
- vicerace <vicerace@sohu.com>
|
||||
- Cliff Frey <cliff@openai.com>
|
||||
- Vladislav Tropnikov <vtr.name@gmail.com>
|
||||
- Leo <i@hardrain980.com>
|
||||
- Johannes Rothe <mail@johannes-rothe.de>
|
||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||
- jhasse-shade <jacob@shade.inc>
|
||||
|
||||
@@ -6,6 +6,22 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.72.1 - 2025-12-10
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
|
||||
|
||||
- Bug Fixes
|
||||
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
|
||||
- doc fixes (Duncan Smart, Nick Craig-Wood)
|
||||
- configfile: Fix piped config support (Jonas Tingeborn)
|
||||
- log
|
||||
- Fix PID not included in JSON log output (Tingsong Xu)
|
||||
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
|
||||
- Google Cloud Storage
|
||||
- Improve endpoint parameter docs (Johannes Rothe)
|
||||
- S3
|
||||
- Add missing regions for Selectel provider (Nick Craig-Wood)
|
||||
|
||||
## v1.72.0 - 2025-11-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
|
||||
|
||||
@@ -43,6 +43,7 @@ See the following for detailed instructions for
|
||||
- [Crypt](/crypt/) - to encrypt other remotes
|
||||
- [DigitalOcean Spaces](/s3/#digitalocean-spaces)
|
||||
- [Digi Storage](/koofr/#digi-storage)
|
||||
- [Drime](/drime/)
|
||||
- [Dropbox](/dropbox/)
|
||||
- [Enterprise File Fabric](/filefabric/)
|
||||
- [FileLu Cloud Storage](/filelu/)
|
||||
@@ -82,6 +83,7 @@ See the following for detailed instructions for
|
||||
- [rsync.net](/sftp/#rsync-net)
|
||||
- [Seafile](/seafile/)
|
||||
- [SFTP](/sftp/)
|
||||
- [Shade](/shade/)
|
||||
- [Sia](/sia/)
|
||||
- [SMB](/smb/)
|
||||
- [Storj](/storj/)
|
||||
|
||||
236
docs/content/drime.md
Normal file
236
docs/content/drime.md
Normal file
@@ -0,0 +1,236 @@
|
||||
---
|
||||
title: "Drime"
|
||||
description: "Rclone docs for Drime"
|
||||
versionIntroduced: "v1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} Drime
|
||||
|
||||
[Drime](https://drime.cloud/) is a cloud storage and transfer service focused
|
||||
on fast, resilient file delivery. It offers both free and paid tiers with
|
||||
emphasis on high-speed uploads and link sharing.
|
||||
|
||||
The setup Drime you need to log in, go to Settings, Developer, and create a
|
||||
token to use as an API access key. Give it a sensible name and copy the token
|
||||
for use in the config.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is a run through of `rclone config` to make a remote called `Drime`.
|
||||
|
||||
Firstly run:
|
||||
|
||||
|
||||
```console
|
||||
rclone config
|
||||
```
|
||||
|
||||
Then follow through the interactive setup:
|
||||
|
||||
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> Drime
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
XX / Drime
|
||||
\ (drime)
|
||||
Storage> drime
|
||||
|
||||
Option access_token.
|
||||
API Access token
|
||||
You can get this from the web control panel.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_token> YOUR_API_ACCESS_TOKEN
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: drime
|
||||
- access_token: YOUR_API_ACCESS_TOKEN
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
||||
name you gave your remote):
|
||||
|
||||
List directories and files in the top level of your Drime
|
||||
|
||||
```console
|
||||
rclone lsf remote:
|
||||
```
|
||||
|
||||
To copy a local directory to a Drime directory called backup
|
||||
|
||||
```console
|
||||
rclone copy /home/source remote:backup
|
||||
```
|
||||
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Drime does not support modification times or hashes.
|
||||
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
the following characters are also replaced:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| \ | 0x5C | \ |
|
||||
|
||||
File names can also not start or end with the following characters.
|
||||
These only get replaced if they are the last character in the name:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| SP | 0x20 | ␠ |
|
||||
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in JSON strings.
|
||||
|
||||
### Root folder ID
|
||||
|
||||
You can set the `root_folder_id` for rclone. This is the directory
|
||||
(identified by its `Folder ID`) that rclone considers to be the root
|
||||
of your Drime drive.
|
||||
|
||||
Normally you will leave this blank and rclone will determine the
|
||||
correct root to use itself and fill in the value in the config file.
|
||||
|
||||
However you can set this to restrict rclone to a specific folder
|
||||
hierarchy.
|
||||
|
||||
In order to do this you will have to find the `Folder ID` of the
|
||||
directory you wish rclone to display.
|
||||
|
||||
You can do this with rclone
|
||||
|
||||
```console
|
||||
$ rclone lsf -Fip --dirs-only remote:
|
||||
d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
|
||||
f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
|
||||
d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
|
||||
```
|
||||
|
||||
The ID to use is the part before the `;` so you could set
|
||||
|
||||
```text
|
||||
root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
|
||||
```
|
||||
|
||||
To restrict rclone to the `Files` directory.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drime/drime.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to drime (Drime).
|
||||
|
||||
#### --drime-access-token
|
||||
|
||||
API Access token
|
||||
|
||||
You can get this from the web control panel.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_DRIME_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to drime (Drime).
|
||||
|
||||
#### --drime-root-folder-id
|
||||
|
||||
ID of the root folder
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
If you want rclone to be restricted to a particular folder you can
|
||||
fill it in - see the docs for more info.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-workspace-id
|
||||
|
||||
Account ID
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: workspace_id
|
||||
- Env Var: RCLONE_DRIME_WORKSPACE_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-list-chunk
|
||||
|
||||
Number of items to list in each call
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_DRIME_LIST_CHUNK
|
||||
- Type: int
|
||||
- Default: 1000
|
||||
|
||||
#### --drime-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_DRIME_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
|
||||
|
||||
#### --drime-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_DRIME_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
## Limitations
|
||||
|
||||
Drime only supports filenames up to 255 characters in length, where a
|
||||
character is a UTF8-byte character.
|
||||
|
||||
@@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Box | SHA1 | R/W | Yes | No | - | - |
|
||||
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
|
||||
| Cloudinary | MD5 | R | No | Yes | - | - |
|
||||
| Drime | - | - | No | No | R/W | - |
|
||||
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
|
||||
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
|
||||
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
|
||||
@@ -59,6 +60,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Quatrix by Maytech | - | R/W | No | No | - | - |
|
||||
| Seafile | - | - | No | No | - | - |
|
||||
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
|
||||
| Shade | - | - | Yes | No | - | - |
|
||||
| Sia | - | - | No | No | - | - |
|
||||
| SMB | - | R/W | Yes | No | - | - |
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
@@ -514,6 +516,7 @@ upon backend-specific capabilities.
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Drime | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
|
||||
218
docs/content/shade.md
Normal file
218
docs/content/shade.md
Normal file
@@ -0,0 +1,218 @@
|
||||
# {{< icon "fa fa-moon" >}} Shade
|
||||
|
||||
This is a backend for the [Shade](https://shade.inc/) platform
|
||||
|
||||
## About Shade
|
||||
|
||||
[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage.
|
||||
|
||||
|
||||
## Accounts & Pricing
|
||||
|
||||
To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free.
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of making a Shade configuration.
|
||||
|
||||
First, create a [create a free account](https://app.shade.inc/) account and choose a plan.
|
||||
|
||||
You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively.
|
||||
|
||||
Now run
|
||||
|
||||
`rclone config`
|
||||
|
||||
Follow this interactive process:
|
||||
|
||||
```sh
|
||||
$ rclone config
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> Shade
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[OTHER OPTIONS]
|
||||
xx / Shade FS
|
||||
\ (shade)
|
||||
[OTHER OPTIONS]
|
||||
Storage> xx
|
||||
|
||||
Option drive_id.
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
Enter a value.
|
||||
drive_id> [YOUR_ID]
|
||||
|
||||
Option api_key.
|
||||
An API key for your account.
|
||||
Enter a value.
|
||||
api_key> [YOUR_API_KEY]
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: shade
|
||||
- drive_id: [YOUR_ID]
|
||||
- api_key: [YOUR_API_KEY]
|
||||
Keep this "Shade" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Shade does not support hashes and writing mod times.
|
||||
|
||||
|
||||
### Transfers
|
||||
|
||||
Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests.
|
||||
|
||||
### Deleting files
|
||||
|
||||
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
|
||||
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-drive-id
|
||||
|
||||
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: drive_id
|
||||
- Env Var: RCLONE_SHADE_DRIVE_ID
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --shade-api-key
|
||||
|
||||
An API key for your account. You can find this under Settings > API Keys
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_SHADE_API_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to shade (Shade FS).
|
||||
|
||||
#### --shade-endpoint
|
||||
|
||||
Endpoint for the service.
|
||||
|
||||
Leave blank normally.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_SHADE_ENDPOINT
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --shade-chunk-size
|
||||
|
||||
Chunk size to use for uploading.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of this size.
|
||||
|
||||
Note that this is stored in memory per transfer, so increasing it will
|
||||
increase memory usage.
|
||||
|
||||
Minimum is 5MB, maximum is 5GB.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: chunk_size
|
||||
- Env Var: RCLONE_SHADE_CHUNK_SIZE
|
||||
- Type: SizeSuffix
|
||||
- Default: 64Mi
|
||||
|
||||
#### --shade-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_SHADE_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --shade-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_SHADE_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
## Limitations
|
||||
|
||||
Note that Shade is case insensitive so you can't have a file called
|
||||
"Hello.doc" and one called "hello.doc".
|
||||
|
||||
Shade only supports filenames up to 255 characters in length.
|
||||
|
||||
`rclone about` is not supported by the Shade backend. Backends without
|
||||
this capability cannot determine free space for an rclone mount or
|
||||
use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
## Backend commands
|
||||
|
||||
Here are the commands specific to the shade backend.
|
||||
|
||||
Run them with
|
||||
|
||||
rclone backend COMMAND remote:
|
||||
|
||||
The help below will explain what arguments each command takes.
|
||||
|
||||
See the [backend](/commands/rclone_backend/) command for more
|
||||
info on how to pass options and arguments.
|
||||
|
||||
These can be run on a running backend using the rc command
|
||||
[backend/command](/rc/#backend-command).
|
||||
|
||||
|
||||
@@ -66,6 +66,7 @@
|
||||
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a>
|
||||
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a>
|
||||
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
|
||||
<a class="dropdown-item" href="/drime/"><i class="fab fa-cloud fa-fw"></i> Drime</a>
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
|
||||
@@ -107,6 +108,7 @@
|
||||
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
|
||||
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
|
||||
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
|
||||
<a class="dropdown-item" href="/shade/"><i class="fa fa-moon fa-fw"></i> Shade</a>
|
||||
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
|
||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||
|
||||
@@ -2,6 +2,7 @@ package configfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -362,3 +363,39 @@ func TestConfigFileSaveSymlinkAbsolute(t *testing.T) {
|
||||
testSymlink(t, link, target, resolvedTarget)
|
||||
})
|
||||
}
|
||||
|
||||
type pipedInput struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (p *pipedInput) Read(b []byte) (int, error) {
|
||||
return p.Reader.Read(b)
|
||||
}
|
||||
|
||||
func (*pipedInput) Seek(int64, int) (int64, error) {
|
||||
return 0, fmt.Errorf("Seek not supported")
|
||||
}
|
||||
|
||||
func TestPipedConfig(t *testing.T) {
|
||||
t.Run("DoesNotSupportSeeking", func(t *testing.T) {
|
||||
r := &pipedInput{strings.NewReader("")}
|
||||
_, err := r.Seek(0, io.SeekStart)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("IsSupported", func(t *testing.T) {
|
||||
r := &pipedInput{strings.NewReader(configData)}
|
||||
_, err := config.Decrypt(r)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("PlainTextConfigIsNotConsumedByCryptCheck", func(t *testing.T) {
|
||||
in := &pipedInput{strings.NewReader(configData)}
|
||||
|
||||
r, _ := config.Decrypt(in)
|
||||
got, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, configData, string(got))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -77,8 +77,9 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
|
||||
return nil, errors.New("unsupported configuration encryption - update rclone for support")
|
||||
}
|
||||
// Restore non-seekable plain-text stream to its original state
|
||||
if _, err := b.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
return io.MultiReader(strings.NewReader(l+"\n"), r), nil
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func InitLogging() {
|
||||
// Log file output
|
||||
if Opt.File != "" {
|
||||
var w io.Writer
|
||||
if Opt.MaxSize == 0 {
|
||||
if Opt.MaxSize < 0 {
|
||||
// No log rotation - just open the file as normal
|
||||
// We'll capture tracebacks like this too.
|
||||
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)
|
||||
|
||||
@@ -310,6 +310,10 @@ func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.R
|
||||
r.AddAttrs(
|
||||
slog.String("source", getCaller(2)),
|
||||
)
|
||||
// Add PID if requested
|
||||
if h.format&logFormatPid != 0 {
|
||||
r.AddAttrs(slog.Int("pid", os.Getpid()))
|
||||
}
|
||||
h.mu.Lock()
|
||||
err = h.jsonHandler.Handle(ctx, r)
|
||||
if err == nil {
|
||||
|
||||
@@ -198,6 +198,17 @@ func TestAddOutputUseJSONLog(t *testing.T) {
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test JSON log includes PID when logFormatPid is set.
|
||||
func TestJSONLogWithPid(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatJSON|logFormatPid)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
output := buf.String()
|
||||
assert.Contains(t, output, fmt.Sprintf(`"pid":%d`, os.Getpid()))
|
||||
}
|
||||
|
||||
// Test WithAttrs and WithGroup return new handlers with same settings.
|
||||
func TestWithAttrsAndGroup(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
@@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq := httptest.NewRequest("POST", "/", formReader)
|
||||
@@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile2.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName)
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq = httptest.NewRequest("POST", "/", formReader)
|
||||
|
||||
@@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
|
||||
err := Sync(ctx, r.Fremote, r.Flocal, false)
|
||||
assert.Equal(t, fs.ErrorNotDeleting, err)
|
||||
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(
|
||||
t,
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -507,6 +508,7 @@ func TestError(t *testing.T) {
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
assert.Error(t, err)
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
|
||||
@@ -662,6 +662,10 @@ backends:
|
||||
ignoretests:
|
||||
- cmd/bisync
|
||||
- cmd/gitannex
|
||||
- backend: "shade"
|
||||
remote: "TestShade:"
|
||||
fastlist: false
|
||||
|
||||
- backend: "archive"
|
||||
remote: "TestArchive:"
|
||||
fastlist: false
|
||||
@@ -673,3 +677,8 @@ backends:
|
||||
# with the parent backend having a different precision.
|
||||
- TestServerSideCopyOverSelf
|
||||
- TestServerSideMoveOverSelf
|
||||
- backend: "drime"
|
||||
remote: "TestDrime:"
|
||||
ignoretests:
|
||||
- TestBisyncRemoteLocal/check_access_filters
|
||||
fastlist: false
|
||||
|
||||
@@ -361,9 +361,6 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
|
||||
} else if dc.rootID == dc.trueRootID {
|
||||
return "", errors.New("is root directory")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", errors.New("internal error: didn't find rootParentID")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"maps"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
@@ -145,6 +146,7 @@ type Opts struct {
|
||||
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
|
||||
MultipartContentName string // ..name of the parameter which is the attached file
|
||||
MultipartFileName string // ..name of the file for the attached file
|
||||
MultipartContentType string // ..content type of the attached file
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Trailer *http.Header // set the request trailer
|
||||
@@ -371,6 +373,17 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
|
||||
// a new form-data header with the provided field name and file name.
|
||||
func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
|
||||
h := make(textproto.MIMEHeader)
|
||||
h.Set("Content-Disposition", multipart.FileContentDisposition(fieldname, filename))
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return w.CreatePart(h)
|
||||
}
|
||||
|
||||
// MultipartUpload creates an io.Reader which produces an encoded a
|
||||
// multipart form upload from the params passed in and the passed in
|
||||
//
|
||||
@@ -382,10 +395,10 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
|
||||
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
|
||||
//
|
||||
// NB This doesn't allow setting the content type of the attachment
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) {
|
||||
bodyReader, bodyWriter := io.Pipe()
|
||||
writer := multipart.NewWriter(bodyWriter)
|
||||
contentType := writer.FormDataContentType()
|
||||
formContentType := writer.FormDataContentType()
|
||||
|
||||
// Create a Multipart Writer as base for calculating the Content-Length
|
||||
buf := &bytes.Buffer{}
|
||||
@@ -404,7 +417,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
}
|
||||
if in != nil {
|
||||
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName)
|
||||
_, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType)
|
||||
if err != nil {
|
||||
return nil, "", 0, err
|
||||
}
|
||||
@@ -445,7 +458,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
|
||||
if in != nil {
|
||||
part, err := writer.CreateFormFile(contentName, fileName)
|
||||
part, err := CreateFormFile(writer, contentName, fileName, contentType)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
@@ -467,7 +480,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
_ = bodyWriter.Close()
|
||||
}()
|
||||
|
||||
return bodyReader, contentType, multipartLength, nil
|
||||
return bodyReader, formContentType, multipartLength, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
@@ -539,7 +552,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo
|
||||
opts = opts.Copy()
|
||||
|
||||
var overhead int64
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user