1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-09 20:13:21 +00:00

Compare commits

...

13 Commits

Author SHA1 Message Date
dougal
e51a0599a0 log: fix systemd adding extra newline - fixes #9086
This was broken in v1.71.0 as a typo.
2026-01-09 16:30:01 +00:00
Qingwei Li
530a901de3 oracleobjectstorage, sftp: eliminate unnecessary heap allocation
Move the declaration location of variables to eliminate heap
allocation which may make rclone faster and reduce memory usage slightly.

Fixes #9078
2026-01-09 16:10:02 +00:00
Nicolas Dessart
a64a8aad0e sftp,ftp: add http proxy authentication support
This change supports the `http://user:pass@host:port` syntax for the
http_proxy setting.
2026-01-08 16:31:11 +00:00
dougal
6529d2cd8f Add Drime backend
Co-Authored-By: Nick Craig-Wood <nick@craig-wood.com>
2026-01-08 12:05:37 +00:00
Nick Craig-Wood
d9895fef9d lib/rest: add opts.MultipartContentType to explicitly set Content-Type of attachements
Before this the standard library set it to application/octet-stream for some reason
2026-01-08 12:05:37 +00:00
dougal
8c7b7ac891 dircache: allow empty string as root parent id
This was causing an internal error with the drime backend which has the
root parent id as an empty string. This shouldn't affect anything else.
2026-01-08 12:05:37 +00:00
Nick Craig-Wood
f814498561 docs: update sponsors 2026-01-08 12:05:30 +00:00
vupn0712
5f4e4b1a20 s3: add provider Bizfly Cloud Simple Storage
Co-authored-by: sys6101 <csvmen@gmail.com>
2026-01-06 14:56:49 +00:00
Nick Craig-Wood
28c187b9b4 docs: update sponsor logos 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
e07afc4645 Add sys6101 to contributors 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
08932ab92a Add darkdragon-001 to contributors 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
356ee57edb Add vupn0712 to contributors 2025-12-31 17:04:11 +00:00
yuval-cloudinary
7c1660214d docs: add cloudinary to readme 2025-12-22 22:39:53 +01:00
33 changed files with 2235 additions and 61 deletions

View File

@@ -28,16 +28,19 @@ directories to and from different cloud storage providers.
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) - Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/) - Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) - ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) - Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/) - Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) - Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) - China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) - Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit) - Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) - DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) - Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) - Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/) - Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) - Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba) - Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)

View File

@@ -16,6 +16,7 @@ import (
_ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi" _ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drime"
_ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/fichier"

237
backend/drime/api/types.go Normal file
View File

@@ -0,0 +1,237 @@
// Package api has type definitions for drime
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
// Types of things in Item
const (
ItemTypeFolder = "folder"
)
// User information
type User struct {
Email string `json:"email"`
ID json.Number `json:"id"`
Avatar string `json:"avatar"`
ModelType string `json:"model_type"`
OwnsEntry bool `json:"owns_entry"`
EntryPermissions []any `json:"entry_permissions"`
DisplayName string `json:"display_name"`
}
// Permissions for a file
type Permissions struct {
FilesUpdate bool `json:"files.update"`
FilesCreate bool `json:"files.create"`
FilesDownload bool `json:"files.download"`
FilesDelete bool `json:"files.delete"`
}
// Item describes a folder or a file as returned by /drive/file-entries
type Item struct {
ID json.Number `json:"id"`
Name string `json:"name"`
Description any `json:"description"`
FileName string `json:"file_name"`
Mime string `json:"mime"`
Color any `json:"color"`
Backup bool `json:"backup"`
Tracked int `json:"tracked"`
FileSize int64 `json:"file_size"`
UserID json.Number `json:"user_id"`
ParentID json.Number `json:"parent_id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt any `json:"deleted_at"`
IsDeleted int `json:"is_deleted"`
Path string `json:"path"`
DiskPrefix any `json:"disk_prefix"`
Type string `json:"type"`
Extension any `json:"extension"`
FileHash any `json:"file_hash"`
Public bool `json:"public"`
Thumbnail bool `json:"thumbnail"`
MuxStatus any `json:"mux_status"`
ThumbnailURL any `json:"thumbnail_url"`
WorkspaceID int `json:"workspace_id"`
IsEncrypted int `json:"is_encrypted"`
Iv any `json:"iv"`
VaultID any `json:"vault_id"`
OwnerID int `json:"owner_id"`
Hash string `json:"hash"`
URL string `json:"url"`
Users []User `json:"users"`
Tags []any `json:"tags"`
Permissions Permissions `json:"permissions"`
}
// Listing response
type Listing struct {
CurrentPage int `json:"current_page"`
Data []Item `json:"data"`
From int `json:"from"`
LastPage int `json:"last_page"`
NextPage int `json:"next_page"`
PerPage int `json:"per_page"`
PrevPage int `json:"prev_page"`
To int `json:"to"`
Total int `json:"total"`
}
// UploadResponse for a file
type UploadResponse struct {
Status string `json:"status"`
FileEntry Item `json:"fileEntry"`
}
// CreateFolderRequest for a folder
type CreateFolderRequest struct {
Name string `json:"name"`
ParentID json.Number `json:"parentId,omitempty"`
}
// CreateFolderResponse for a folder
type CreateFolderResponse struct {
Status string `json:"status"`
Folder Item `json:"folder"`
}
// Error is returned from drime when things go wrong
type Error struct {
Message string `json:"message"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Message)
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// DeleteRequest is the input to DELETE /file-entries
type DeleteRequest struct {
EntryIDs []string `json:"entryIds"`
DeleteForever bool `json:"deleteForever"`
}
// DeleteResponse is the input to DELETE /file-entries
type DeleteResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Errors map[string]string `json:"errors"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
type UpdateItemRequest struct {
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
}
// UpdateItemResponse is returned by PUT /file-entries/{id}/
type UpdateItemResponse struct {
Status string `json:"status"`
FileEntry Item `json:"fileEntry"`
}
// MoveRequest is the input to /file-entries/move
type MoveRequest struct {
EntryIDs []string `json:"entryIds"`
DestinationID string `json:"destinationId"`
}
// MoveResponse is returned by POST /file-entries/move
type MoveResponse struct {
Status string `json:"status"`
Entries []Item `json:"entries"`
}
// CopyRequest is the input to /file-entries/duplicate
type CopyRequest struct {
EntryIDs []string `json:"entryIds"`
DestinationID string `json:"destinationId"`
}
// CopyResponse is returned by POST /file-entries/duplicate
type CopyResponse struct {
Status string `json:"status"`
Entries []Item `json:"entries"`
}
// MultiPartCreateRequest is the input of POST /s3/multipart/create
type MultiPartCreateRequest struct {
Filename string `json:"filename"`
Mime string `json:"mime"`
Size int64 `json:"size"`
Extension string `json:"extension"`
ParentID json.Number `json:"parent_id"`
RelativePath string `json:"relativePath"`
}
// MultiPartCreateResponse is returned by POST /s3/multipart/create
type MultiPartCreateResponse struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
}
// CompletedPart Type for completed parts when making a multipart upload.
type CompletedPart struct {
ETag string `json:"ETag"`
PartNumber int32 `json:"PartNumber"`
}
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
type MultiPartGetURLsRequest struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
PartNumbers []int `json:"partNumbers"`
}
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
type MultiPartGetURLsResponse struct {
URLs []struct {
URL string `json:"url"`
PartNumber int32 `json:"partNumber"`
} `json:"urls"`
}
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
type MultiPartCompleteRequest struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
Parts []CompletedPart `json:"parts"`
}
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
type MultiPartCompleteResponse struct {
Location string `json:"location"`
}
// MultiPartEntriesRequest is the input to POST /s3/entries
type MultiPartEntriesRequest struct {
ClientMime string `json:"clientMime"`
ClientName string `json:"clientName"`
Filename string `json:"filename"`
Size int64 `json:"size"`
ClientExtension string `json:"clientExtension"`
ParentID json.Number `json:"parent_id"`
RelativePath string `json:"relativePath"`
}
// MultiPartEntriesResponse is the result of POST /s3/entries
type MultiPartEntriesResponse struct {
FileEntry Item `json:"fileEntry"`
}
// MultiPartAbort is the input of POST /s3/multipart/abort
type MultiPartAbort struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
}

1563
backend/drime/drime.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,33 @@
// Drime filesystem interface
package drime
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrime:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -204,6 +204,12 @@ Example:
Help: `URL for HTTP CONNECT proxy Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
Supports the format http://user:pass@host:port, http://host:port, http://host.
Example:
http://myUser:myPass@proxyhostname.example.com:8000
`, `,
Advanced: true, Advanced: true,
}, { }, {
@@ -892,7 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
resultchan := make(chan []*ftp.Entry, 1) resultchan := make(chan []*ftp.Entry, 1)
errchan := make(chan error, 1) errchan := make(chan error, 1)
go func() { go func(c *ftp.ServerConn) {
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir))) result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
f.putFtpConnection(&c, err) f.putFtpConnection(&c, err)
if err != nil { if err != nil {
@@ -900,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return return
} }
resultchan <- result resultchan <- result
}() }(c)
// Wait for List for up to Timeout seconds // Wait for List for up to Timeout seconds
timer := time.NewTimer(f.ci.TimeoutOrInfinite()) timer := time.NewTimer(f.ci.TimeoutOrInfinite())

View File

@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
response := &UploadResult{} response := &UploadResult{}
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName) formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err) return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)

View File

@@ -60,9 +60,6 @@ type StateChangeConf struct {
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) { func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target) // fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
targetOccurrence := 0
// Set a default for times to check for not found // Set a default for times to check for not found
if conf.NotFoundChecks == 0 { if conf.NotFoundChecks == 0 {
conf.NotFoundChecks = 20 conf.NotFoundChecks = 20
@@ -84,9 +81,11 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
// cancellation channel for the refresh loop // cancellation channel for the refresh loop
cancelCh := make(chan struct{}) cancelCh := make(chan struct{})
go func() {
notfoundTick := 0
targetOccurrence := 0
result := Result{} result := Result{}
go func() {
defer close(resCh) defer close(resCh)
select { select {

View File

@@ -1459,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// opts.Body=0), so upload it as a multipart form POST with // opts.Body=0), so upload it as a multipart form POST with
// Content-Length set. // Content-Length set.
if size == 0 { if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
if err != nil { if err != nil {
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err) return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
} }

View File

@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
for i := range iVal.NumField() { for i := range iVal.NumField() {
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String()) params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
} }
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
if err != nil { if err != nil {
return fmt.Errorf("failed to make multipart upload: %w", err) return fmt.Errorf("failed to make multipart upload: %w", err)
} }

View File

@@ -0,0 +1,15 @@
name: BizflyCloud
description: Bizfly Cloud Simple Storage
region:
hn: Ha Noi
hcm: Ho Chi Minh
endpoint:
hn.ss.bfcplatform.vn: Hanoi endpoint
hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint
acl: {}
bucket_acl: true
quirks:
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false

View File

@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
"need_idx_progress": {"true"}, "need_idx_progress": {"true"},
"replace": {"1"}, "replace": {"1"},
} }
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename)) formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make multipart upload: %w", err) return nil, fmt.Errorf("failed to make multipart upload: %w", err)
} }

View File

@@ -519,6 +519,12 @@ Example:
Help: `URL for HTTP CONNECT proxy Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
Supports the format http://user:pass@host:port, http://host:port, http://host.
Example:
http://myUser:myPass@proxyhostname.example.com:8000
`, `,
Advanced: true, Advanced: true,
}, { }, {

View File

@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("filename", url.QueryEscape(name)) params.Set("filename", url.QueryEscape(name))
params.Set("parent_id", parent) params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true)) params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to make multipart upload: %w", err) return nil, fmt.Errorf("failed to make multipart upload: %w", err)
} }

View File

@@ -43,6 +43,7 @@ docs = [
"compress.md", "compress.md",
"combine.md", "combine.md",
"doi.md", "doi.md",
"drime.md"
"dropbox.md", "dropbox.md",
"filefabric.md", "filefabric.md",
"filelu.md", "filelu.md",

View File

@@ -291,7 +291,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
} }
} }
fs.Debugf(c.what, " - accepted: %v\n", ok) fs.Debugf(c.what, " - accepted: %v\n", ok)
err = req.Reply(ok, reply) err := req.Reply(ok, reply)
if err != nil { if err != nil {
fs.Errorf(c.what, "Failed to Reply to request: %v", err) fs.Errorf(c.what, "Failed to Reply to request: %v", err)
return return

View File

@@ -116,6 +116,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}} {{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}} {{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}} {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
{{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}} {{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}}
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}} {{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}} {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
@@ -128,6 +129,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}} {{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}} {{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}} {{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}} {{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}

View File

@@ -1060,3 +1060,6 @@ put them back in again. -->
- jhasse-shade <jacob@shade.inc> - jhasse-shade <jacob@shade.inc>
- vyv03354 <VYV03354@nifty.ne.jp> - vyv03354 <VYV03354@nifty.ne.jp>
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com> - masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
- vupn0712 <126212736+vupn0712@users.noreply.github.com>
- darkdragon-001 <darkdragon-001@users.noreply.github.com>
- sys6101 <csvmen@gmail.com>

View File

@@ -43,6 +43,7 @@ See the following for detailed instructions for
- [Crypt](/crypt/) - to encrypt other remotes - [Crypt](/crypt/) - to encrypt other remotes
- [DigitalOcean Spaces](/s3/#digitalocean-spaces) - [DigitalOcean Spaces](/s3/#digitalocean-spaces)
- [Digi Storage](/koofr/#digi-storage) - [Digi Storage](/koofr/#digi-storage)
- [Drime](/drime/)
- [Dropbox](/dropbox/) - [Dropbox](/dropbox/)
- [Enterprise File Fabric](/filefabric/) - [Enterprise File Fabric](/filefabric/)
- [FileLu Cloud Storage](/filelu/) - [FileLu Cloud Storage](/filelu/)

244
docs/content/drime.md Normal file
View File

@@ -0,0 +1,244 @@
---
title: "Drime"
description: "Rclone docs for Drime"
versionIntroduced: "v1.73"
---
# {{< icon "fa fa-cloud" >}} Drime
[Drime](https://drime.cloud/) is a cloud storage and transfer service focused
on fast, resilient file delivery. It offers both free and paid tiers with
emphasis on high-speed uploads and link sharing.
To setup Drime you need to log in, navigate to Settings, Developer, and create a
token to use as an API access key. Give it a sensible name and copy the token
for use in the config.
## Configuration
Here is a run through of `rclone config` to make a remote called `remote`.
Firstly run:
```console
rclone config
```
Then follow through the interactive setup:
```text
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> remote
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
XX / Drime
\ (drime)
Storage> drime
Option access_token.
API Access token
You can get this from the web control panel.
Enter a value. Press Enter to leave empty.
access_token> YOUR_API_ACCESS_TOKEN
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: drime
- access_token: YOUR_API_ACCESS_TOKEN
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Once configured you can then use `rclone` like this (replace `remote` with the
name you gave your remote):
List directories and files in the top level of your Drime
```console
rclone lsf remote:
```
To copy a local directory to a Drime directory called backup
```console
rclone copy /home/source remote:backup
```
### Modification times and hashes
Drime does not support modification times or hashes.
This means that by default syncs will only use the size of the file to determine
if it needs updating.
You can use the `--update` flag which will use the time the object was uploaded.
For many operations this is sufficient to determine if it has changed. However
files created with timestamps in the past will be missed by the sync if using
`--update`.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)
the following characters are also replaced:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| \ | 0x5C | |
File names can also not start or end with the following characters. These only
get replaced if they are the first or last character in the name:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| SP | 0x20 | ␠ |
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
### Root folder ID
You can set the `root_folder_id` for rclone. This is the directory
(identified by its `Folder ID`) that rclone considers to be the root
of your Drime drive.
Normally you will leave this blank and rclone will determine the
correct root to use itself and fill in the value in the config file.
However you can set this to restrict rclone to a specific folder
hierarchy.
In order to do this you will have to find the `Folder ID` of the
directory you wish rclone to display.
You can do this with rclone
```console
$ rclone lsf -Fip --dirs-only remote:
d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
```
The ID to use is the part before the `;` so you could set
```text
root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
```
To restrict rclone to the `Files` directory.
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drime/drime.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options
Here are the Standard options specific to drime (Drime).
#### --drime-access-token
API Access token
You can get this from the web control panel.
Properties:
- Config: access_token
- Env Var: RCLONE_DRIME_ACCESS_TOKEN
- Type: string
- Required: false
### Advanced options
Here are the Advanced options specific to drime (Drime).
#### --drime-root-folder-id
ID of the root folder
Leave this blank normally, rclone will fill it in automatically.
If you want rclone to be restricted to a particular folder you can
fill it in - see the docs for more info.
Properties:
- Config: root_folder_id
- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID
- Type: string
- Required: false
#### --drime-workspace-id
Account ID
Leave this blank normally, rclone will fill it in automatically.
Properties:
- Config: workspace_id
- Env Var: RCLONE_DRIME_WORKSPACE_ID
- Type: string
- Required: false
#### --drime-list-chunk
Number of items to list in each call
Properties:
- Config: list_chunk
- Env Var: RCLONE_DRIME_LIST_CHUNK
- Type: int
- Default: 1000
#### --drime-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_DRIME_ENCODING
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
#### --drime-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_DRIME_DESCRIPTION
- Type: string
- Required: false
<!-- autogenerated options stop -->
## Limitations
Drime only supports filenames up to 255 bytes in length, where filenames are
encoded in UTF8.

View File

@@ -498,6 +498,12 @@ URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
Supports the format http://user:pass@host:port, http://host:port, http://host.
Example:
http://myUser:myPass@proxyhostname.example.com:8000
Properties: Properties:

View File

@@ -23,6 +23,7 @@ Here is an overview of the major features of each cloud storage system.
| Box | SHA1 | R/W | Yes | No | - | - | | Box | SHA1 | R/W | Yes | No | - | - |
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - | | Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
| Cloudinary | MD5 | R | No | Yes | - | - | | Cloudinary | MD5 | R | No | Yes | - | - |
| Drime | - | - | No | No | R/W | - |
| Dropbox | DBHASH ¹ | R | Yes | No | - | - | | Dropbox | DBHASH ¹ | R | Yes | No | - | - |
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - | | Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - | | FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
@@ -515,6 +516,7 @@ upon backend-specific capabilities.
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No | | Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes | | Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes | | Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
| Drime | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No | | Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | | Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |

View File

@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}} {{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
{{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}} {{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}}
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud" >}} {{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud" >}}
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
{{< provider name="Cubbit DS3" home="https://cubbit.io/ds3-cloud" config="/s3/#Cubbit" >}} {{< provider name="Cubbit DS3" home="https://cubbit.io/ds3-cloud" config="/s3/#Cubbit" >}}
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
@@ -4536,6 +4537,36 @@ server_side_encryption =
storage_class = storage_class =
``` ```
### BizflyCloud {#bizflycloud}
[Bizfly Cloud Simple Storage](https://bizflycloud.vn/simple-storage) is an
S3-compatible service with regions in Hanoi (HN) and Ho Chi Minh City (HCM).
Use the endpoint for your region:
- HN: `hn.ss.bfcplatform.vn`
- HCM: `hcm.ss.bfcplatform.vn`
A minimal configuration looks like this.
```ini
[bizfly]
type = s3
provider = BizflyCloud
env_auth = false
access_key_id = YOUR_ACCESS_KEY
secret_access_key = YOUR_SECRET_KEY
region = HN
endpoint = hn.ss.bfcplatform.vn
location_constraint =
acl =
server_side_encryption =
storage_class =
```
Switch `region` and `endpoint` to `HCM` and `hcm.ss.bfcplatform.vn` for Ho Chi
Minh City.
### Ceph ### Ceph
[Ceph](https://ceph.com/) is an open-source, unified, distributed [Ceph](https://ceph.com/) is an open-source, unified, distributed

View File

@@ -1186,6 +1186,12 @@ URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb. Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
Supports the format http://user:pass@host:port, http://host:port, http://host.
Example:
http://myUser:myPass@proxyhostname.example.com:8000
Properties: Properties:

View File

@@ -13,7 +13,7 @@ Thank you to our sponsors:
<!-- markdownlint-capture --> <!-- markdownlint-capture -->
<!-- markdownlint-disable line-length no-bare-urls --> <!-- markdownlint-disable line-length no-bare-urls -->
{{< sponsor src="/img/logos/rabata/txt_1_300x114.png" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}} {{< sponsor src="/img/logos/rabata.svg" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}}
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}} {{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}} {{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}} {{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}

View File

@@ -10,40 +10,21 @@
{{end}} {{end}}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Platinum Sponsor</div>
Platinum Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br /> <a href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img src="/img/logos/rabata.svg"></a><br />
<script>
const imgs = [
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
];
const img = imgs[Math.floor(Math.random() * imgs.length)];
document.addEventListener("DOMContentLoaded", () => {
const a = document.getElementById("platinum");
a.href = img.href;
a.querySelector("img").src = img.img;
});
</script>
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Gold Sponsor</div>
Gold Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a href="https://www.idrive.com/e2/?refer=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor IDrive e2"><img src="/img/logos/idrive_e2.svg" viewBox="0 0 60 55"></a><br /> <a href="https://www.idrive.com/e2/?refer=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor IDrive e2"><img src="/img/logos/idrive_e2.svg" viewBox="0 0 60 55"></a><br />
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Gold Sponsor</div>
Gold Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a href="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone" target="_blank" rel="noopener" title="Start Your Free Trial Today"><img style="max-width: 100%; height: auto;" src="/img/logos/filescom-enterprise-grade-workflows.png"></a><br /> <a href="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone" target="_blank" rel="noopener" title="Start Your Free Trial Today"><img style="max-width: 100%; height: auto;" src="/img/logos/filescom-enterprise-grade-workflows.png"></a><br />
</div> </div>
@@ -51,25 +32,19 @@
{{if .IsHome}} {{if .IsHome}}
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Silver Sponsor</div>
Silver Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview-banner.svg"></a><br /> <a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview.svg"></a><br />
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Silver Sponsor</div>
Silver Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br /> <a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
</div> </div>
</div> </div>
<div class="card"> <div class="card">
<div class="card-header"> <div class="card-header">Silver Sponsor</div>
Silver Sponsor
</div>
<div class="card-body"> <div class="card-body">
<a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br /> <a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br />
</div> </div>

View File

@@ -66,6 +66,7 @@
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a> <a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a>
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a> <a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a>
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a> <a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
<a class="dropdown-item" href="/drime/"><i class="fab fa-cloud fa-fw"></i> Drime</a>
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a> <a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a> <a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a> <a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>

View File

@@ -16,7 +16,7 @@ func startSystemdLog(handler *OutputHandler) bool {
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid) handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
handler.setFormatFlags(logFormatNoLevel) handler.setFormatFlags(logFormatNoLevel)
handler.SetOutput(func(level slog.Level, text string) { handler.SetOutput(func(level slog.Level, text string) {
_ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s\n", level, text) _ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s", level, text)
}) })
return true return true
} }

View File

@@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) {
assert.NoError(t, currentFile.Close()) assert.NoError(t, currentFile.Close())
}() }()
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName) formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream")
require.NoError(t, err) require.NoError(t, err)
httpReq := httptest.NewRequest("POST", "/", formReader) httpReq := httptest.NewRequest("POST", "/", formReader)
@@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) {
assert.NoError(t, currentFile2.Close()) assert.NoError(t, currentFile2.Close())
}() }()
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName) formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream")
require.NoError(t, err) require.NoError(t, err)
httpReq = httptest.NewRequest("POST", "/", formReader) httpReq = httptest.NewRequest("POST", "/", formReader)

View File

@@ -677,3 +677,9 @@ backends:
# with the parent backend having a different precision. # with the parent backend having a different precision.
- TestServerSideCopyOverSelf - TestServerSideCopyOverSelf
- TestServerSideMoveOverSelf - TestServerSideMoveOverSelf
- backend: "drime"
remote: "TestDrime:"
ignoretests:
# The TestBisyncRemoteLocal/check_access_filters tests fail due to duplicated objects
- cmd/bisync
fastlist: false

View File

@@ -361,9 +361,6 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
} else if dc.rootID == dc.trueRootID { } else if dc.rootID == dc.trueRootID {
return "", errors.New("is root directory") return "", errors.New("is root directory")
} }
if dc.rootParentID == "" {
return "", errors.New("internal error: didn't find rootParentID")
}
return dc.rootParentID, nil return dc.rootParentID, nil
} }

View File

@@ -3,6 +3,7 @@ package proxy
import ( import (
"bufio" "bufio"
"crypto/tls" "crypto/tls"
"encoding/base64"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
@@ -55,7 +56,13 @@ func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.
} }
// send CONNECT // send CONNECT
user := proxyURL.User
if user != nil {
credential := base64.StdEncoding.EncodeToString([]byte(user.String()))
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\nProxy-Authorization: Basic %s\r\n\r\n", addr, addr, credential)
} else {
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr) _, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
}
if err != nil { if err != nil {
_ = conn.Close() _ = conn.Close()
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err) return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)

View File

@@ -14,7 +14,9 @@ import (
"maps" "maps"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"net/textproto"
"net/url" "net/url"
"strings"
"sync" "sync"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -145,6 +147,7 @@ type Opts struct {
MultipartMetadataName string // ..this is used for the name of the metadata form part if set MultipartMetadataName string // ..this is used for the name of the metadata form part if set
MultipartContentName string // ..name of the parameter which is the attached file MultipartContentName string // ..name of the parameter which is the attached file
MultipartFileName string // ..name of the file for the attached file MultipartFileName string // ..name of the file for the attached file
MultipartContentType string // ..content type of the attached file
Parameters url.Values // any parameters for the final URL Parameters url.Values // any parameters for the final URL
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
Trailer *http.Header // set the request trailer Trailer *http.Header // set the request trailer
@@ -371,6 +374,32 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
return resp, nil return resp, nil
} }
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
func escapeQuotes(s string) string {
return quoteEscaper.Replace(s)
}
// multipartFileContentDisposition returns the value of a Content-Disposition header
// with the provided field name and file name.
func multipartFileContentDisposition(fieldname, filename string) string {
return fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
escapeQuotes(fieldname), escapeQuotes(filename))
}
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
// a new form-data header with the provided field name and file name.
func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
// FIXME when go1.24 is no longer supported, change to
// multipart.FileContentDisposition and remove definition above
h.Set("Content-Disposition", multipartFileContentDisposition(fieldname, filename))
if contentType != "" {
h.Set("Content-Type", contentType)
}
return w.CreatePart(h)
}
// MultipartUpload creates an io.Reader which produces an encoded a // MultipartUpload creates an io.Reader which produces an encoded a
// multipart form upload from the params passed in and the passed in // multipart form upload from the params passed in and the passed in
// //
@@ -382,10 +411,10 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required // the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
// //
// NB This doesn't allow setting the content type of the attachment // NB This doesn't allow setting the content type of the attachment
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) { func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) {
bodyReader, bodyWriter := io.Pipe() bodyReader, bodyWriter := io.Pipe()
writer := multipart.NewWriter(bodyWriter) writer := multipart.NewWriter(bodyWriter)
contentType := writer.FormDataContentType() formContentType := writer.FormDataContentType()
// Create a Multipart Writer as base for calculating the Content-Length // Create a Multipart Writer as base for calculating the Content-Length
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
@@ -404,7 +433,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
} }
} }
if in != nil { if in != nil {
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName) _, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType)
if err != nil { if err != nil {
return nil, "", 0, err return nil, "", 0, err
} }
@@ -445,7 +474,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
} }
if in != nil { if in != nil {
part, err := writer.CreateFormFile(contentName, fileName) part, err := CreateFormFile(writer, contentName, fileName, contentType)
if err != nil { if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err)) _ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return return
@@ -467,7 +496,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
_ = bodyWriter.Close() _ = bodyWriter.Close()
}() }()
return bodyReader, contentType, multipartLength, nil return bodyReader, formContentType, multipartLength, nil
} }
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil) // CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
@@ -539,7 +568,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo
opts = opts.Copy() opts = opts.Copy()
var overhead int64 var overhead int64
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName) opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType)
if err != nil { if err != nil {
return nil, err return nil, err
} }