mirror of
https://github.com/rclone/rclone.git
synced 2026-01-12 13:34:01 +00:00
Compare commits
2 Commits
fix-sftp-i
...
fix-9031-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5bf9b629f | ||
|
|
6f81885ebf |
@@ -28,19 +28,16 @@ directories to and from different cloud storage providers.
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drime"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
|
||||
@@ -133,32 +133,23 @@ type File struct {
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// StorageAPI is as returned from the b2_authorize_account call
|
||||
type StorageAPI struct {
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||
ID string `json:"id"` // ID of bucket
|
||||
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||
} `json:"buckets"`
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||
Storage StorageAPI `json:"storageApi"`
|
||||
} `json:"apiInfo"`
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
|
||||
131
backend/b2/b2.go
131
backend/b2/b2.go
@@ -607,29 +607,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to one or more buckets, one of them must exist
|
||||
// and be ours.
|
||||
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||
var rootFound = false
|
||||
var rootID string
|
||||
for _, b := range buckets {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||
if allowedBucket == "" {
|
||||
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if allowedBucket == f.rootBucket {
|
||||
rootFound = true
|
||||
rootID = b.ID
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
}
|
||||
if !rootFound {
|
||||
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, rootID)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
}
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -655,7 +643,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v4/b2_authorize_account",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
@@ -668,13 +656,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -1079,83 +1067,44 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||
|
||||
call := func(id string) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: id,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses = append(responses, response)
|
||||
return nil
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
err := call(b.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
|
||||
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
|
||||
err := call("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.bucketIDMutex.Lock()
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketID = make(map[string]string, 1)
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
f.bucketTypeMutex.Unlock()
|
||||
f.bucketIDMutex.Unlock()
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err := fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err = fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1657,7 +1606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.APIs.Storage.DownloadURL
|
||||
RootURL = f.info.DownloadURL
|
||||
} else {
|
||||
RootURL = f.opt.DownloadURL
|
||||
}
|
||||
@@ -2008,7 +1957,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
// Package api has type definitions for drime
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
)
|
||||
|
||||
// User information
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID json.Number `json:"id"`
|
||||
Avatar string `json:"avatar"`
|
||||
ModelType string `json:"model_type"`
|
||||
OwnsEntry bool `json:"owns_entry"`
|
||||
EntryPermissions []any `json:"entry_permissions"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
|
||||
// Permissions for a file
|
||||
type Permissions struct {
|
||||
FilesUpdate bool `json:"files.update"`
|
||||
FilesCreate bool `json:"files.create"`
|
||||
FilesDownload bool `json:"files.download"`
|
||||
FilesDelete bool `json:"files.delete"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by /drive/file-entries
|
||||
type Item struct {
|
||||
ID json.Number `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description any `json:"description"`
|
||||
FileName string `json:"file_name"`
|
||||
Mime string `json:"mime"`
|
||||
Color any `json:"color"`
|
||||
Backup bool `json:"backup"`
|
||||
Tracked int `json:"tracked"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
UserID json.Number `json:"user_id"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DeletedAt any `json:"deleted_at"`
|
||||
IsDeleted int `json:"is_deleted"`
|
||||
Path string `json:"path"`
|
||||
DiskPrefix any `json:"disk_prefix"`
|
||||
Type string `json:"type"`
|
||||
Extension any `json:"extension"`
|
||||
FileHash any `json:"file_hash"`
|
||||
Public bool `json:"public"`
|
||||
Thumbnail bool `json:"thumbnail"`
|
||||
MuxStatus any `json:"mux_status"`
|
||||
ThumbnailURL any `json:"thumbnail_url"`
|
||||
WorkspaceID int `json:"workspace_id"`
|
||||
IsEncrypted int `json:"is_encrypted"`
|
||||
Iv any `json:"iv"`
|
||||
VaultID any `json:"vault_id"`
|
||||
OwnerID int `json:"owner_id"`
|
||||
Hash string `json:"hash"`
|
||||
URL string `json:"url"`
|
||||
Users []User `json:"users"`
|
||||
Tags []any `json:"tags"`
|
||||
Permissions Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Listing response
|
||||
type Listing struct {
|
||||
CurrentPage int `json:"current_page"`
|
||||
Data []Item `json:"data"`
|
||||
From int `json:"from"`
|
||||
LastPage int `json:"last_page"`
|
||||
NextPage int `json:"next_page"`
|
||||
PerPage int `json:"per_page"`
|
||||
PrevPage int `json:"prev_page"`
|
||||
To int `json:"to"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// UploadResponse for a file
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest for a folder
|
||||
type CreateFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
ParentID json.Number `json:"parentId,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse for a folder
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Folder Item `json:"folder"`
|
||||
}
|
||||
|
||||
// Error is returned from drime when things go wrong
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Message)
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// DeleteRequest is the input to DELETE /file-entries
|
||||
type DeleteRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DeleteForever bool `json:"deleteForever"`
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /file-entries
|
||||
type DeleteResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Errors map[string]string `json:"errors"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /file-entries/{id}/
|
||||
type UpdateItemResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /file-entries/move
|
||||
type MoveRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /file-entries/move
|
||||
type MoveResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /file-entries/duplicate
|
||||
type CopyRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /file-entries/duplicate
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// MultiPartCreateRequest is the input of POST /s3/multipart/create
|
||||
type MultiPartCreateRequest struct {
|
||||
Filename string `json:"filename"`
|
||||
Mime string `json:"mime"`
|
||||
Size int64 `json:"size"`
|
||||
Extension string `json:"extension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartCreateResponse is returned by POST /s3/multipart/create
|
||||
type MultiPartCreateResponse struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string `json:"ETag"`
|
||||
PartNumber int32 `json:"PartNumber"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
PartNumbers []int `json:"partNumbers"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsResponse struct {
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
PartNumber int32 `json:"partNumber"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
|
||||
type MultiPartCompleteRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
Parts []CompletedPart `json:"parts"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
|
||||
type MultiPartCompleteResponse struct {
|
||||
Location string `json:"location"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesRequest is the input to POST /s3/entries
|
||||
type MultiPartEntriesRequest struct {
|
||||
ClientMime string `json:"clientMime"`
|
||||
ClientName string `json:"clientName"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
ClientExtension string `json:"clientExtension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesResponse is the result of POST /s3/entries
|
||||
type MultiPartEntriesResponse struct {
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MultiPartAbort is the input of POST /s3/multipart/abort
|
||||
type MultiPartAbort struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
||||
// Drime filesystem interface
|
||||
package drime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrime:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
@@ -204,12 +204,6 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
|
||||
@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -25,8 +24,7 @@ import (
|
||||
var (
|
||||
hashType = hash.MD5
|
||||
// the object storage is persistent
|
||||
buckets = newBucketsInfo()
|
||||
errWriteOnly = errors.New("can't read when using --memory-discard")
|
||||
buckets = newBucketsInfo()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -35,32 +33,12 @@ func init() {
|
||||
Name: "memory",
|
||||
Description: "In memory object storage system.",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "discard",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `If set all writes will be discarded and reads will return an error
|
||||
|
||||
If set then when files are uploaded the contents not be saved. The
|
||||
files will appear to have been uploaded but will give an error on
|
||||
read. Files will have their MD5 sum calculated on upload which takes
|
||||
very little CPU time and allows the transfers to be checked.
|
||||
|
||||
This can be useful for testing performance.
|
||||
|
||||
Probably most easily used by using the connection string syntax:
|
||||
|
||||
:memory,discard:bucket
|
||||
|
||||
`,
|
||||
}},
|
||||
Options: []fs.Option{},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Discard bool `config:"discard"`
|
||||
}
|
||||
type Options struct{}
|
||||
|
||||
// Fs represents a remote memory server
|
||||
type Fs struct {
|
||||
@@ -186,7 +164,6 @@ type objectData struct {
|
||||
hash string
|
||||
mimeType string
|
||||
data []byte
|
||||
size int64
|
||||
}
|
||||
|
||||
// Object describes a memory object
|
||||
@@ -581,7 +558,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.od.hash == "" && !o.fs.opt.Discard {
|
||||
if o.od.hash == "" {
|
||||
sum := md5.Sum(o.od.data)
|
||||
o.od.hash = hex.EncodeToString(sum[:])
|
||||
}
|
||||
@@ -590,7 +567,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.od.size
|
||||
return int64(len(o.od.data))
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
@@ -616,9 +593,6 @@ func (o *Object) Storable() bool {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.fs.opt.Discard {
|
||||
return nil, errWriteOnly
|
||||
}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -650,24 +624,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
var data []byte
|
||||
var size int64
|
||||
var hash string
|
||||
if o.fs.opt.Discard {
|
||||
h := md5.New()
|
||||
size, err = io.Copy(h, in)
|
||||
hash = hex.EncodeToString(h.Sum(nil))
|
||||
} else {
|
||||
data, err = io.ReadAll(in)
|
||||
size = int64(len(data))
|
||||
}
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
o.od = &objectData{
|
||||
data: data,
|
||||
size: size,
|
||||
hash: hash,
|
||||
hash: "",
|
||||
modTime: src.ModTime(ctx),
|
||||
mimeType: fs.MimeType(ctx, src),
|
||||
}
|
||||
|
||||
@@ -222,11 +222,3 @@ type UserInfo struct {
|
||||
} `json:"steps"`
|
||||
} `json:"journey"`
|
||||
}
|
||||
|
||||
// DiffResult is the response from /diff
|
||||
type DiffResult struct {
|
||||
Result int `json:"result"`
|
||||
DiffID int64 `json:"diffid"`
|
||||
Entries []map[string]any `json:"entries"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
@@ -171,7 +171,6 @@ type Fs struct {
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
lastDiffID int64 // change tracking state for diff long-polling
|
||||
}
|
||||
|
||||
// Object describes a pcloud object
|
||||
@@ -1034,137 +1033,6 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ChangeNotify implements fs.Features.ChangeNotify
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Start long-poll loop in background
|
||||
go f.changeNotifyLoop(ctx, notify, ch)
|
||||
}
|
||||
|
||||
// changeNotifyLoop contains the blocking long-poll logic.
|
||||
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
// Standard polling interval
|
||||
interval := 30 * time.Second
|
||||
|
||||
// Start with diffID = 0 to get the current state
|
||||
var diffID int64
|
||||
|
||||
// Helper to process changes from the diff API
|
||||
handleChanges := func(entries []map[string]any) {
|
||||
notifiedPaths := make(map[string]bool)
|
||||
|
||||
for _, entry := range entries {
|
||||
meta, ok := entry["metadata"].(map[string]any)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Robust extraction of ParentFolderID
|
||||
var pid int64
|
||||
if val, ok := meta["parentfolderid"]; ok {
|
||||
switch v := val.(type) {
|
||||
case float64:
|
||||
pid = int64(v)
|
||||
case int64:
|
||||
pid = v
|
||||
case int:
|
||||
pid = int64(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the path using dirCache.GetInv
|
||||
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
|
||||
dirID := fmt.Sprintf("d%d", pid)
|
||||
parentPath, ok := f.dirCache.GetInv(dirID)
|
||||
|
||||
if !ok {
|
||||
// Parent not in cache, so we can ignore this change as it is outside
|
||||
// of what the mount has seen or cares about.
|
||||
continue
|
||||
}
|
||||
|
||||
name, _ := meta["name"].(string)
|
||||
fullPath := path.Join(parentPath, name)
|
||||
|
||||
// Determine EntryType (File or Directory)
|
||||
entryType := fs.EntryObject
|
||||
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
|
||||
entryType = fs.EntryDirectory
|
||||
}
|
||||
|
||||
// Deduplicate notifications for this batch
|
||||
if !notifiedPaths[fullPath] {
|
||||
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
|
||||
notify(fullPath, entryType)
|
||||
notifiedPaths[fullPath] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
// Check context and channel
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case newInterval, ok := <-ch:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
interval = newInterval
|
||||
default:
|
||||
}
|
||||
|
||||
// Setup /diff Request
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/diff",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
|
||||
if diffID != 0 {
|
||||
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
|
||||
opts.Parameters.Set("block", "1")
|
||||
} else {
|
||||
opts.Parameters.Set("last", "0")
|
||||
}
|
||||
|
||||
// Perform Long-Poll
|
||||
// Timeout set to 90s (server usually blocks for 60s max)
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||
var result api.DiffResult
|
||||
|
||||
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
// Ignore timeout errors as they are normal for long-polling
|
||||
if !errors.Is(err, context.DeadlineExceeded) {
|
||||
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
|
||||
time.Sleep(interval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If result is not 0, reset DiffID to resync
|
||||
if result.Result != 0 {
|
||||
diffID = 0
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if result.DiffID != 0 {
|
||||
diffID = result.DiffID
|
||||
f.lastDiffID = diffID
|
||||
}
|
||||
|
||||
if len(result.Entries) > 0 {
|
||||
handleChanges(result.Entries)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||
@@ -1459,7 +1327,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||
}
|
||||
@@ -1533,7 +1401,6 @@ var (
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
for i := range iVal.NumField() {
|
||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||
}
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
name: BizflyCloud
|
||||
description: Bizfly Cloud Simple Storage
|
||||
region:
|
||||
hn: Ha Noi
|
||||
hcm: Ho Chi Minh
|
||||
endpoint:
|
||||
hn.ss.bfcplatform.vn: Hanoi endpoint
|
||||
hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
@@ -2928,9 +2928,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: types.MetadataDirectiveCopy,
|
||||
}
|
||||
if srcObj.storageClass != nil {
|
||||
req.StorageClass = types.StorageClass(*srcObj.storageClass)
|
||||
}
|
||||
|
||||
// Build upload options including headers and metadata
|
||||
ci := fs.GetConfig(ctx)
|
||||
uploadOptions := fs.MetadataAsOpenOptions(ctx)
|
||||
@@ -4503,12 +4501,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
ACL: types.ObjectCannedACL(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
}
|
||||
if tierObj, ok := src.(fs.GetTierer); ok {
|
||||
tier := tierObj.GetTier()
|
||||
if tier != "" {
|
||||
ui.req.StorageClass = types.StorageClass(strings.ToUpper(tier))
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
|
||||
@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -519,12 +519,6 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -925,8 +919,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// Set up sshConfig here from opt
|
||||
// **NB** everything else should be setup in NewFsWithConnection
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
@@ -1174,21 +1175,11 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
|
||||
@@ -311,8 +311,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
mtime: srcObj.mtime,
|
||||
size: srcObj.size,
|
||||
}
|
||||
fromFullPath := path.Join(src.Fs().Root(), srcObj.remote)
|
||||
toFullPath := path.Join(f.root, remote)
|
||||
@@ -369,18 +367,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
fullPathSrc := f.buildFullPath(srcRemote)
|
||||
fullPathSrcUnencoded, err := url.QueryUnescape(fullPathSrc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullPathDstUnencoded, err := url.QueryUnescape(fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = f.ensureParentDirectories(ctx, dstRemote)
|
||||
err := f.ensureParentDirectories(ctx, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -391,15 +378,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
}
|
||||
|
||||
_, err = f.Move(ctx, o, dstRemote)
|
||||
|
||||
if err == nil {
|
||||
|
||||
f.createdDirMu.Lock()
|
||||
f.createdDirs[fullPathSrcUnencoded] = false
|
||||
f.createdDirs[fullPathDstUnencoded] = true
|
||||
f.createdDirMu.Unlock()
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ docs = [
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"drime.md"
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
|
||||
@@ -26,10 +26,6 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||
use |-R| to make them recurse.
|
||||
|
||||
List commands prefer a recursive method that uses more memory but fewer
|
||||
transactions by default. Use |--disable ListR| to suppress the behavior.
|
||||
See [|--fast-list|](/docs/#fast-list) for more details.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).`, "|", "`")
|
||||
|
||||
@@ -13,26 +13,6 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
`--auth-key` is not provided then `serve s3` will allow anonymous
|
||||
access.
|
||||
|
||||
Like all rclone flags `--auth-key` can be set via environment
|
||||
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
|
||||
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
|
||||
`accessKey,secretKey` has a comma in, this means it needs to be in
|
||||
quotes.
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user,pass"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Or to supply multiple identities:
|
||||
|
||||
```console
|
||||
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
|
||||
rclone serve s3 ...
|
||||
```
|
||||
|
||||
Setting this variable without quotes will produce an error.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
|
||||
@@ -70,11 +70,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w.s3Secret = getAuthSecret(opt.AuthKey)
|
||||
}
|
||||
|
||||
authList, err := authlistResolver(opt.AuthKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing auth list failed: %q", err)
|
||||
}
|
||||
|
||||
var newLogger logger
|
||||
w.faker = gofakes3.New(
|
||||
newBackend(w),
|
||||
@@ -82,7 +77,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
gofakes3.WithLogger(newLogger),
|
||||
gofakes3.WithRequestID(rand.Uint64()),
|
||||
gofakes3.WithoutVersioning(),
|
||||
gofakes3.WithV4Auth(authList),
|
||||
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
@@ -97,7 +92,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
w._vfs = vfs.New(f, vfsOpt)
|
||||
|
||||
if len(opt.AuthKey) > 0 {
|
||||
w.faker.AddAuthKeys(authList)
|
||||
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package s3
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
@@ -126,14 +125,15 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
|
||||
}
|
||||
}
|
||||
|
||||
func authlistResolver(list []string) (map[string]string, error) {
|
||||
func authlistResolver(list []string) map[string]string {
|
||||
authList := make(map[string]string)
|
||||
for _, v := range list {
|
||||
parts := strings.Split(v, ",")
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("invalid auth pair: expecting a single comma")
|
||||
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
|
||||
continue
|
||||
}
|
||||
authList[parts[0]] = parts[1]
|
||||
}
|
||||
return authList, nil
|
||||
return authList
|
||||
}
|
||||
|
||||
@@ -116,7 +116,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
@@ -129,7 +128,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
|
||||
@@ -1058,8 +1058,3 @@ put them back in again. -->
|
||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||
- jhasse-shade <jacob@shade.inc>
|
||||
- vyv03354 <VYV03354@nifty.ne.jp>
|
||||
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
|
||||
- vupn0712 <126212736+vupn0712@users.noreply.github.com>
|
||||
- darkdragon-001 <darkdragon-001@users.noreply.github.com>
|
||||
- sys6101 <csvmen@gmail.com>
|
||||
|
||||
@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
||||
All copy commands send the following 4 requests:
|
||||
|
||||
```text
|
||||
/b2api/v4/b2_authorize_account
|
||||
/b2api/v1/b2_authorize_account
|
||||
/b2api/v1/b2_create_bucket
|
||||
/b2api/v1/b2_list_buckets
|
||||
/b2api/v1/b2_list_file_names
|
||||
|
||||
@@ -336,7 +336,7 @@ full new copy of the file.
|
||||
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||
as opposed to with a clear warning as in macFUSE.
|
||||
|
||||
## Mounting on Linux
|
||||
# Mounting on Linux
|
||||
|
||||
On newer versions of Ubuntu, you may encounter the following error when running
|
||||
`rclone mount`:
|
||||
|
||||
@@ -43,7 +43,6 @@ See the following for detailed instructions for
|
||||
- [Crypt](/crypt/) - to encrypt other remotes
|
||||
- [DigitalOcean Spaces](/s3/#digitalocean-spaces)
|
||||
- [Digi Storage](/koofr/#digi-storage)
|
||||
- [Drime](/drime/)
|
||||
- [Dropbox](/dropbox/)
|
||||
- [Enterprise File Fabric](/filefabric/)
|
||||
- [FileLu Cloud Storage](/filelu/)
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
---
|
||||
title: "Drime"
|
||||
description: "Rclone docs for Drime"
|
||||
versionIntroduced: "v1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} Drime
|
||||
|
||||
[Drime](https://drime.cloud/) is a cloud storage and transfer service focused
|
||||
on fast, resilient file delivery. It offers both free and paid tiers with
|
||||
emphasis on high-speed uploads and link sharing.
|
||||
|
||||
To setup Drime you need to log in, navigate to Settings, Developer, and create a
|
||||
token to use as an API access key. Give it a sensible name and copy the token
|
||||
for use in the config.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is a run through of `rclone config` to make a remote called `remote`.
|
||||
|
||||
Firstly run:
|
||||
|
||||
|
||||
```console
|
||||
rclone config
|
||||
```
|
||||
|
||||
Then follow through the interactive setup:
|
||||
|
||||
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
XX / Drime
|
||||
\ (drime)
|
||||
Storage> drime
|
||||
|
||||
Option access_token.
|
||||
API Access token
|
||||
You can get this from the web control panel.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_token> YOUR_API_ACCESS_TOKEN
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: drime
|
||||
- access_token: YOUR_API_ACCESS_TOKEN
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
||||
name you gave your remote):
|
||||
|
||||
List directories and files in the top level of your Drime
|
||||
|
||||
```console
|
||||
rclone lsf remote:
|
||||
```
|
||||
|
||||
To copy a local directory to a Drime directory called backup
|
||||
|
||||
```console
|
||||
rclone copy /home/source remote:backup
|
||||
```
|
||||
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Drime does not support modification times or hashes.
|
||||
|
||||
This means that by default syncs will only use the size of the file to determine
|
||||
if it needs updating.
|
||||
|
||||
You can use the `--update` flag which will use the time the object was uploaded.
|
||||
For many operations this is sufficient to determine if it has changed. However
|
||||
files created with timestamps in the past will be missed by the sync if using
|
||||
`--update`.
|
||||
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
the following characters are also replaced:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| \ | 0x5C | \ |
|
||||
|
||||
File names can also not start or end with the following characters. These only
|
||||
get replaced if they are the first or last character in the name:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| SP | 0x20 | ␠ |
|
||||
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in JSON strings.
|
||||
|
||||
### Root folder ID
|
||||
|
||||
You can set the `root_folder_id` for rclone. This is the directory
|
||||
(identified by its `Folder ID`) that rclone considers to be the root
|
||||
of your Drime drive.
|
||||
|
||||
Normally you will leave this blank and rclone will determine the
|
||||
correct root to use itself and fill in the value in the config file.
|
||||
|
||||
However you can set this to restrict rclone to a specific folder
|
||||
hierarchy.
|
||||
|
||||
In order to do this you will have to find the `Folder ID` of the
|
||||
directory you wish rclone to display.
|
||||
|
||||
You can do this with rclone
|
||||
|
||||
```console
|
||||
$ rclone lsf -Fip --dirs-only remote:
|
||||
d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
|
||||
f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
|
||||
d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
|
||||
```
|
||||
|
||||
The ID to use is the part before the `;` so you could set
|
||||
|
||||
```text
|
||||
root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
|
||||
```
|
||||
|
||||
To restrict rclone to the `Files` directory.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drime/drime.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to drime (Drime).
|
||||
|
||||
#### --drime-access-token
|
||||
|
||||
API Access token
|
||||
|
||||
You can get this from the web control panel.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_DRIME_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to drime (Drime).
|
||||
|
||||
#### --drime-root-folder-id
|
||||
|
||||
ID of the root folder
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
If you want rclone to be restricted to a particular folder you can
|
||||
fill it in - see the docs for more info.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-workspace-id
|
||||
|
||||
Account ID
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: workspace_id
|
||||
- Env Var: RCLONE_DRIME_WORKSPACE_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-list-chunk
|
||||
|
||||
Number of items to list in each call
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_DRIME_LIST_CHUNK
|
||||
- Type: int
|
||||
- Default: 1000
|
||||
|
||||
#### --drime-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_DRIME_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
|
||||
|
||||
#### --drime-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_DRIME_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
## Limitations
|
||||
|
||||
Drime only supports filenames up to 255 bytes in length, where filenames are
|
||||
encoded in UTF8.
|
||||
|
||||
@@ -498,12 +498,6 @@ URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Box | SHA1 | R/W | Yes | No | - | - |
|
||||
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
|
||||
| Cloudinary | MD5 | R | No | Yes | - | - |
|
||||
| Drime | - | - | No | No | R/W | - |
|
||||
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
|
||||
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
|
||||
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
|
||||
@@ -516,7 +515,6 @@ upon backend-specific capabilities.
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Drime | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
|
||||
@@ -173,31 +173,6 @@ So if the folder you want rclone to use your is "My Music/", then use the return
|
||||
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
||||
value in the config file.
|
||||
|
||||
### Change notifications and mounts
|
||||
|
||||
The pCloud backend supports real‑time updates for rclone mounts via change
|
||||
notifications. rclone uses pCloud’s diff long‑polling API to detect changes and
|
||||
will automatically refresh directory listings in the mounted filesystem when
|
||||
changes occur.
|
||||
|
||||
Notes and behavior:
|
||||
|
||||
- Works automatically when using `rclone mount` and requires no additional
|
||||
configuration.
|
||||
- Notifications are directory‑scoped: when rclone detects a change, it refreshes
|
||||
the affected directory so new/removed/renamed files become visible promptly.
|
||||
- Updates are near real‑time. The backend uses a long‑poll with short fallback
|
||||
polling intervals, so you should see changes appear quickly without manual
|
||||
refreshes.
|
||||
|
||||
If you want to debug or verify notifications, you can use the helper command:
|
||||
|
||||
```bash
|
||||
rclone test changenotify remote:
|
||||
```
|
||||
|
||||
This will log incoming change notifications for the given remote.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}}
|
||||
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud" >}}
|
||||
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
|
||||
{{< provider name="Cubbit DS3" home="https://cubbit.io/ds3-cloud" config="/s3/#Cubbit" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
@@ -4537,36 +4536,6 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### BizflyCloud {#bizflycloud}
|
||||
|
||||
[Bizfly Cloud Simple Storage](https://bizflycloud.vn/simple-storage) is an
|
||||
S3-compatible service with regions in Hanoi (HN) and Ho Chi Minh City (HCM).
|
||||
|
||||
Use the endpoint for your region:
|
||||
|
||||
- HN: `hn.ss.bfcplatform.vn`
|
||||
- HCM: `hcm.ss.bfcplatform.vn`
|
||||
|
||||
A minimal configuration looks like this.
|
||||
|
||||
```ini
|
||||
[bizfly]
|
||||
type = s3
|
||||
provider = BizflyCloud
|
||||
env_auth = false
|
||||
access_key_id = YOUR_ACCESS_KEY
|
||||
secret_access_key = YOUR_SECRET_KEY
|
||||
region = HN
|
||||
endpoint = hn.ss.bfcplatform.vn
|
||||
location_constraint =
|
||||
acl =
|
||||
server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
Switch `region` and `endpoint` to `HCM` and `hcm.ss.bfcplatform.vn` for Ho Chi
|
||||
Minh City.
|
||||
|
||||
### Ceph
|
||||
|
||||
[Ceph](https://ceph.com/) is an open-source, unified, distributed
|
||||
|
||||
@@ -1186,12 +1186,6 @@ URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ Thank you to our sponsors:
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable line-length no-bare-urls -->
|
||||
|
||||
{{< sponsor src="/img/logos/rabata.svg" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}}
|
||||
{{< sponsor src="/img/logos/rabata/txt_1_300x114.png" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}}
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
||||
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}
|
||||
|
||||
@@ -10,21 +10,40 @@
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">Platinum Sponsor</div>
|
||||
<div class="card-header">
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img src="/img/logos/rabata.svg"></a><br />
|
||||
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br />
|
||||
<script>
|
||||
const imgs = [
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
|
||||
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
|
||||
];
|
||||
const img = imgs[Math.floor(Math.random() * imgs.length)];
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
const a = document.getElementById("platinum");
|
||||
a.href = img.href;
|
||||
a.querySelector("img").src = img.img;
|
||||
});
|
||||
</script>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">Gold Sponsor</div>
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://www.idrive.com/e2/?refer=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor IDrive e2"><img src="/img/logos/idrive_e2.svg" viewBox="0 0 60 55"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">Gold Sponsor</div>
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone" target="_blank" rel="noopener" title="Start Your Free Trial Today"><img style="max-width: 100%; height: auto;" src="/img/logos/filescom-enterprise-grade-workflows.png"></a><br />
|
||||
</div>
|
||||
@@ -32,19 +51,25 @@
|
||||
|
||||
{{if .IsHome}}
|
||||
<div class="card">
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview.svg"></a><br />
|
||||
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview-banner.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br />
|
||||
</div>
|
||||
|
||||
@@ -66,7 +66,6 @@
|
||||
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a>
|
||||
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a>
|
||||
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
|
||||
<a class="dropdown-item" href="/drime/"><i class="fab fa-cloud fa-fw"></i> Drime</a>
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
|
||||
|
||||
@@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq := httptest.NewRequest("POST", "/", formReader)
|
||||
@@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile2.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName)
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq = httptest.NewRequest("POST", "/", formReader)
|
||||
|
||||
@@ -1273,14 +1273,10 @@ func Run(t *testing.T, opt *Opt) {
|
||||
assert.Equal(t, file2Copy.Path, dst.Remote())
|
||||
|
||||
// check that mutating dst does not mutate src
|
||||
if !strings.Contains(fs.ConfigStringFull(f), "copy_is_hardlink") {
|
||||
err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z"))
|
||||
if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime {
|
||||
assert.NoError(t, err)
|
||||
// Re-read the source and check its modtime
|
||||
src = fstest.NewObject(ctx, t, f, src.Remote())
|
||||
assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?")
|
||||
}
|
||||
err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z"))
|
||||
if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime {
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?")
|
||||
}
|
||||
|
||||
// Delete copy
|
||||
|
||||
@@ -677,9 +677,3 @@ backends:
|
||||
# with the parent backend having a different precision.
|
||||
- TestServerSideCopyOverSelf
|
||||
- TestServerSideMoveOverSelf
|
||||
- backend: "drime"
|
||||
remote: "TestDrime:"
|
||||
ignoretests:
|
||||
# The TestBisyncRemoteLocal/check_access_filters tests fail due to duplicated objects
|
||||
- cmd/bisync
|
||||
fastlist: false
|
||||
|
||||
@@ -361,6 +361,9 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
|
||||
} else if dc.rootID == dc.trueRootID {
|
||||
return "", errors.New("is root directory")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", errors.New("internal error: didn't find rootParentID")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package proxy
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -56,13 +55,7 @@ func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.
|
||||
}
|
||||
|
||||
// send CONNECT
|
||||
user := proxyURL.User
|
||||
if user != nil {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(user.String()))
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\nProxy-Authorization: Basic %s\r\n\r\n", addr, addr, credential)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
}
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)
|
||||
|
||||
@@ -14,9 +14,7 @@ import (
|
||||
"maps"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -147,7 +145,6 @@ type Opts struct {
|
||||
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
|
||||
MultipartContentName string // ..name of the parameter which is the attached file
|
||||
MultipartFileName string // ..name of the file for the attached file
|
||||
MultipartContentType string // ..content type of the attached file
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Trailer *http.Header // set the request trailer
|
||||
@@ -374,32 +371,6 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
|
||||
|
||||
func escapeQuotes(s string) string {
|
||||
return quoteEscaper.Replace(s)
|
||||
}
|
||||
|
||||
// multipartFileContentDisposition returns the value of a Content-Disposition header
|
||||
// with the provided field name and file name.
|
||||
func multipartFileContentDisposition(fieldname, filename string) string {
|
||||
return fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
|
||||
escapeQuotes(fieldname), escapeQuotes(filename))
|
||||
}
|
||||
|
||||
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
|
||||
// a new form-data header with the provided field name and file name.
|
||||
func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
|
||||
h := make(textproto.MIMEHeader)
|
||||
// FIXME when go1.24 is no longer supported, change to
|
||||
// multipart.FileContentDisposition and remove definition above
|
||||
h.Set("Content-Disposition", multipartFileContentDisposition(fieldname, filename))
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return w.CreatePart(h)
|
||||
}
|
||||
|
||||
// MultipartUpload creates an io.Reader which produces an encoded a
|
||||
// multipart form upload from the params passed in and the passed in
|
||||
//
|
||||
@@ -411,10 +382,10 @@ func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string
|
||||
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
|
||||
//
|
||||
// NB This doesn't allow setting the content type of the attachment
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) {
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
|
||||
bodyReader, bodyWriter := io.Pipe()
|
||||
writer := multipart.NewWriter(bodyWriter)
|
||||
formContentType := writer.FormDataContentType()
|
||||
contentType := writer.FormDataContentType()
|
||||
|
||||
// Create a Multipart Writer as base for calculating the Content-Length
|
||||
buf := &bytes.Buffer{}
|
||||
@@ -433,7 +404,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
}
|
||||
if in != nil {
|
||||
_, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType)
|
||||
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName)
|
||||
if err != nil {
|
||||
return nil, "", 0, err
|
||||
}
|
||||
@@ -474,7 +445,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
|
||||
if in != nil {
|
||||
part, err := CreateFormFile(writer, contentName, fileName, contentType)
|
||||
part, err := writer.CreateFormFile(contentName, fileName)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
@@ -496,7 +467,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
_ = bodyWriter.Close()
|
||||
}()
|
||||
|
||||
return bodyReader, formContentType, multipartLength, nil
|
||||
return bodyReader, contentType, multipartLength, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
@@ -568,7 +539,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo
|
||||
opts = opts.Copy()
|
||||
|
||||
var overhead int64
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType)
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user