mirror of
https://github.com/rclone/rclone.git
synced 2026-01-23 21:03:24 +00:00
Compare commits
26 Commits
fix-9073-m
...
fix-9115-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8216d66d18 | ||
|
|
6b9f77459a | ||
|
|
4b2c39c585 | ||
|
|
dbf2499d85 | ||
|
|
760a134c95 | ||
|
|
63cfe260a2 | ||
|
|
3550275cd3 | ||
|
|
b728929f44 | ||
|
|
9ec918f137 | ||
|
|
3a9c7ceeb1 | ||
|
|
5502c0f8ae | ||
|
|
d707ae7cf4 | ||
|
|
9bef7f0dbf | ||
|
|
933bbf3ac8 | ||
|
|
ecc5972d6f | ||
|
|
07805796ab | ||
|
|
189e6dbf6a | ||
|
|
d47e289165 | ||
|
|
e51a0599a0 | ||
|
|
530a901de3 | ||
|
|
a64a8aad0e | ||
|
|
6529d2cd8f | ||
|
|
d9895fef9d | ||
|
|
8c7b7ac891 | ||
|
|
f814498561 | ||
|
|
5f4e4b1a20 |
@@ -28,6 +28,7 @@ directories to and from different cloud storage providers.
|
||||
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
- Bizfly Cloud Simple Storage [:page_facing_up:](https://rclone.org/s3/#bizflycloud)
|
||||
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
- Box [:page_facing_up:](https://rclone.org/box/)
|
||||
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
@@ -39,11 +40,13 @@ directories to and from different cloud storage providers.
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
|
||||
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
|
||||
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
- Filen [:page_facing_up:](https://rclone.org/filen/)
|
||||
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
- FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
|
||||
@@ -16,11 +16,13 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drime"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filen"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
@@ -64,7 +66,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/ulozto"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/uptobox"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/zoho"
|
||||
|
||||
@@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
|
||||
237
backend/drime/api/types.go
Normal file
237
backend/drime/api/types.go
Normal file
@@ -0,0 +1,237 @@
|
||||
// Package api has type definitions for drime
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
)
|
||||
|
||||
// User information
|
||||
type User struct {
|
||||
Email string `json:"email"`
|
||||
ID json.Number `json:"id"`
|
||||
Avatar string `json:"avatar"`
|
||||
ModelType string `json:"model_type"`
|
||||
OwnsEntry bool `json:"owns_entry"`
|
||||
EntryPermissions []any `json:"entry_permissions"`
|
||||
DisplayName string `json:"display_name"`
|
||||
}
|
||||
|
||||
// Permissions for a file
|
||||
type Permissions struct {
|
||||
FilesUpdate bool `json:"files.update"`
|
||||
FilesCreate bool `json:"files.create"`
|
||||
FilesDownload bool `json:"files.download"`
|
||||
FilesDelete bool `json:"files.delete"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by /drive/file-entries
|
||||
type Item struct {
|
||||
ID json.Number `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description any `json:"description"`
|
||||
FileName string `json:"file_name"`
|
||||
Mime string `json:"mime"`
|
||||
Color any `json:"color"`
|
||||
Backup bool `json:"backup"`
|
||||
Tracked int `json:"tracked"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
UserID json.Number `json:"user_id"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DeletedAt any `json:"deleted_at"`
|
||||
IsDeleted int `json:"is_deleted"`
|
||||
Path string `json:"path"`
|
||||
DiskPrefix any `json:"disk_prefix"`
|
||||
Type string `json:"type"`
|
||||
Extension any `json:"extension"`
|
||||
FileHash any `json:"file_hash"`
|
||||
Public bool `json:"public"`
|
||||
Thumbnail bool `json:"thumbnail"`
|
||||
MuxStatus any `json:"mux_status"`
|
||||
ThumbnailURL any `json:"thumbnail_url"`
|
||||
WorkspaceID int `json:"workspace_id"`
|
||||
IsEncrypted int `json:"is_encrypted"`
|
||||
Iv any `json:"iv"`
|
||||
VaultID any `json:"vault_id"`
|
||||
OwnerID int `json:"owner_id"`
|
||||
Hash string `json:"hash"`
|
||||
URL string `json:"url"`
|
||||
Users []User `json:"users"`
|
||||
Tags []any `json:"tags"`
|
||||
Permissions Permissions `json:"permissions"`
|
||||
}
|
||||
|
||||
// Listing response
|
||||
type Listing struct {
|
||||
CurrentPage int `json:"current_page"`
|
||||
Data []Item `json:"data"`
|
||||
From int `json:"from"`
|
||||
LastPage int `json:"last_page"`
|
||||
NextPage int `json:"next_page"`
|
||||
PerPage int `json:"per_page"`
|
||||
PrevPage int `json:"prev_page"`
|
||||
To int `json:"to"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
// UploadResponse for a file
|
||||
type UploadResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest for a folder
|
||||
type CreateFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
ParentID json.Number `json:"parentId,omitempty"`
|
||||
}
|
||||
|
||||
// CreateFolderResponse for a folder
|
||||
type CreateFolderResponse struct {
|
||||
Status string `json:"status"`
|
||||
Folder Item `json:"folder"`
|
||||
}
|
||||
|
||||
// Error is returned from drime when things go wrong
|
||||
type Error struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q", e.Message)
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error satisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// DeleteRequest is the input to DELETE /file-entries
|
||||
type DeleteRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DeleteForever bool `json:"deleteForever"`
|
||||
}
|
||||
|
||||
// DeleteResponse is the input to DELETE /file-entries
|
||||
type DeleteResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
Errors map[string]string `json:"errors"`
|
||||
}
|
||||
|
||||
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
|
||||
type UpdateItemRequest struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateItemResponse is returned by PUT /file-entries/{id}/
|
||||
type UpdateItemResponse struct {
|
||||
Status string `json:"status"`
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MoveRequest is the input to /file-entries/move
|
||||
type MoveRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// MoveResponse is returned by POST /file-entries/move
|
||||
type MoveResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// CopyRequest is the input to /file-entries/duplicate
|
||||
type CopyRequest struct {
|
||||
EntryIDs []string `json:"entryIds"`
|
||||
DestinationID string `json:"destinationId"`
|
||||
}
|
||||
|
||||
// CopyResponse is returned by POST /file-entries/duplicate
|
||||
type CopyResponse struct {
|
||||
Status string `json:"status"`
|
||||
Entries []Item `json:"entries"`
|
||||
}
|
||||
|
||||
// MultiPartCreateRequest is the input of POST /s3/multipart/create
|
||||
type MultiPartCreateRequest struct {
|
||||
Filename string `json:"filename"`
|
||||
Mime string `json:"mime"`
|
||||
Size int64 `json:"size"`
|
||||
Extension string `json:"extension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartCreateResponse is returned by POST /s3/multipart/create
|
||||
type MultiPartCreateResponse struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
// CompletedPart Type for completed parts when making a multipart upload.
|
||||
type CompletedPart struct {
|
||||
ETag string `json:"ETag"`
|
||||
PartNumber int32 `json:"PartNumber"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
PartNumbers []int `json:"partNumbers"`
|
||||
}
|
||||
|
||||
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
|
||||
type MultiPartGetURLsResponse struct {
|
||||
URLs []struct {
|
||||
URL string `json:"url"`
|
||||
PartNumber int32 `json:"partNumber"`
|
||||
} `json:"urls"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
|
||||
type MultiPartCompleteRequest struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
Parts []CompletedPart `json:"parts"`
|
||||
}
|
||||
|
||||
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
|
||||
type MultiPartCompleteResponse struct {
|
||||
Location string `json:"location"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesRequest is the input to POST /s3/entries
|
||||
type MultiPartEntriesRequest struct {
|
||||
ClientMime string `json:"clientMime"`
|
||||
ClientName string `json:"clientName"`
|
||||
Filename string `json:"filename"`
|
||||
Size int64 `json:"size"`
|
||||
ClientExtension string `json:"clientExtension"`
|
||||
ParentID json.Number `json:"parent_id"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
}
|
||||
|
||||
// MultiPartEntriesResponse is the result of POST /s3/entries
|
||||
type MultiPartEntriesResponse struct {
|
||||
FileEntry Item `json:"fileEntry"`
|
||||
}
|
||||
|
||||
// MultiPartAbort is the input of POST /s3/multipart/abort
|
||||
type MultiPartAbort struct {
|
||||
UploadID string `json:"uploadId"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
1563
backend/drime/drime.go
Normal file
1563
backend/drime/drime.go
Normal file
File diff suppressed because it is too large
Load Diff
33
backend/drime/drime_test.go
Normal file
33
backend/drime/drime_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Drime filesystem interface
|
||||
package drime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrime:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetUploadCutoffer = (*Fs)(nil)
|
||||
)
|
||||
1178
backend/filen/filen.go
Normal file
1178
backend/filen/filen.go
Normal file
File diff suppressed because it is too large
Load Diff
14
backend/filen/filen_test.go
Normal file
14
backend/filen/filen_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package filen
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFilen:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -204,6 +204,12 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -892,7 +898,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
go func(c *ftp.ServerConn) {
|
||||
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -900,7 +906,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return
|
||||
}
|
||||
resultchan <- result
|
||||
}()
|
||||
}(c)
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(f.ci.TimeoutOrInfinite())
|
||||
|
||||
@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
|
||||
|
||||
response := &UploadResult{}
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
|
||||
@@ -403,7 +403,7 @@ This is why this flag is not set as the default.
|
||||
|
||||
As a rule of thumb if nearly all of your data is under rclone's root
|
||||
directory (the |root/directory| in |onedrive:root/directory|) then
|
||||
using this flag will be be a big performance win. If your data is
|
||||
using this flag will be a big performance win. If your data is
|
||||
mostly not under the root then using this flag will be a big
|
||||
performance loss.
|
||||
|
||||
|
||||
@@ -60,9 +60,6 @@ type StateChangeConf struct {
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
@@ -84,9 +81,11 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
result := Result{}
|
||||
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
|
||||
@@ -1459,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// opts.Body=0), so upload it as a multipart form POST with
|
||||
// Content-Length set.
|
||||
if size == 0 {
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
||||
}
|
||||
|
||||
@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
||||
for i := range iVal.NumField() {
|
||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||
}
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
15
backend/s3/provider/BizflyCloud.yaml
Normal file
15
backend/s3/provider/BizflyCloud.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: BizflyCloud
|
||||
description: Bizfly Cloud Simple Storage
|
||||
region:
|
||||
hn: Ha Noi
|
||||
hcm: Ho Chi Minh
|
||||
endpoint:
|
||||
hn.ss.bfcplatform.vn: Hanoi endpoint
|
||||
hcm.ss.bfcplatform.vn: Ho Chi Minh endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
"need_idx_progress": {"true"},
|
||||
"replace": {"1"},
|
||||
}
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -519,6 +519,12 @@ Example:
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -919,15 +925,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
// Set up sshConfig here from opt
|
||||
// **NB** everything else should be setup in NewFsWithConnection
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
@@ -1175,11 +1174,21 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
f.mkdirLock = newStringLock()
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
f.savedpswd = ""
|
||||
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(f.opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
@@ -1249,7 +1258,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
|
||||
@@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: strings.ReplaceAll(`Above this size files will be chunked.
|
||||
|
||||
Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
|
||||
Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container
|
||||
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
|
||||
for more info). Default for this is 5 GiB which is its maximum value, which
|
||||
means only files above this size will be chunked.
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
// Package api provides types used by the Uptobox API.
|
||||
package api
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error contains the error code and message returned by the API
|
||||
type Error struct {
|
||||
Success bool `json:"success,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and satisfies the error interface
|
||||
func (e Error) Error() string {
|
||||
out := fmt.Sprintf("api error %d", e.StatusCode)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.Data != "" {
|
||||
out += ": " + e.Data
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// FolderEntry represents a Uptobox subfolder when listing folder contents
|
||||
type FolderEntry struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Description string `json:"fld_descr"`
|
||||
Password string `json:"fld_password"`
|
||||
FullPath string `json:"fullPath"`
|
||||
Path string `json:"fld_name"`
|
||||
Name string `json:"name"`
|
||||
Hash string `json:"hash"`
|
||||
}
|
||||
|
||||
// FolderInfo represents the current folder when listing folder contents
|
||||
type FolderInfo struct {
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
Hash string `json:"hash"`
|
||||
FileCount uint64 `json:"fileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
}
|
||||
|
||||
// FileInfo represents a file when listing folder contents
|
||||
type FileInfo struct {
|
||||
Name string `json:"file_name"`
|
||||
Description string `json:"file_descr"`
|
||||
Created string `json:"file_created"`
|
||||
Size int64 `json:"file_size"`
|
||||
Downloads uint64 `json:"file_downloads"`
|
||||
Code string `json:"file_code"`
|
||||
Password string `json:"file_password"`
|
||||
Public int `json:"file_public"`
|
||||
LastDownload string `json:"file_last_download"`
|
||||
ID uint64 `json:"id"`
|
||||
}
|
||||
|
||||
// ReadMetadataResponse is the response when listing folder contents
|
||||
type ReadMetadataResponse struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
CurrentFolder FolderInfo `json:"currentFolder"`
|
||||
Folders []FolderEntry `json:"folders"`
|
||||
Files []FileInfo `json:"files"`
|
||||
PageCount int `json:"pageCount"`
|
||||
TotalFileCount int `json:"totalFileCount"`
|
||||
TotalFileSize int64 `json:"totalFileSize"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadInfo is the response when initiating an upload
|
||||
type UploadInfo struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
UploadLink string `json:"uploadLink"`
|
||||
MaxUpload string `json:"maxUpload"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the response to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
DeleteURL string `json:"deleteUrl"`
|
||||
} `json:"files"`
|
||||
}
|
||||
|
||||
// UpdateResponse is a generic response to various action on files (rename/copy/move)
|
||||
type UpdateResponse struct {
|
||||
Message string `json:"message"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
}
|
||||
|
||||
// Download is the response when requesting a download link
|
||||
type Download struct {
|
||||
StatusCode int `json:"statusCode"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
DownloadLink string `json:"dlLink"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// MetadataRequestOptions represents all the options when listing folder contents
|
||||
type MetadataRequestOptions struct {
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
SearchField string
|
||||
Search string
|
||||
}
|
||||
|
||||
// CreateFolderRequest is used for creating a folder
|
||||
type CreateFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// DeleteFolderRequest is used for deleting a folder
|
||||
type DeleteFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
}
|
||||
|
||||
// CopyMoveFileRequest is used for moving/copying a file
|
||||
type CopyMoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// MoveFolderRequest is used for moving a folder
|
||||
type MoveFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
DestinationFolderID uint64 `json:"destination_fld_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// RenameFolderRequest is used for renaming a folder
|
||||
type RenameFolderRequest struct {
|
||||
Token string `json:"token"`
|
||||
FolderID uint64 `json:"fld_id"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// UpdateFileInformation is used for renaming a file
|
||||
type UpdateFileInformation struct {
|
||||
Token string `json:"token"`
|
||||
FileCode string `json:"file_code"`
|
||||
NewName string `json:"new_name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Public string `json:"public,omitempty"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is used for deleting a file
|
||||
type RemoveFileRequest struct {
|
||||
Token string `json:"token"`
|
||||
FileCodes string `json:"file_codes"`
|
||||
}
|
||||
|
||||
// Token represents the authentication token
|
||||
type Token struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
||||
// Test Uptobox filesystem interface
|
||||
package uptobox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/uptobox"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestUptobox:"
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*uptobox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
||||
params.Set("filename", url.QueryEscape(name))
|
||||
params.Set("parent_id", parent)
|
||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,9 +43,11 @@ docs = [
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"drime.md"
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
"filen.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
@@ -89,7 +91,6 @@ docs = [
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"ulozto.md",
|
||||
"uptobox.md",
|
||||
"union.md",
|
||||
"webdav.md",
|
||||
"yandex.md",
|
||||
|
||||
@@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() {
|
||||
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
|
||||
// filesystem without additional parsing [1]. Our workaround is roughly to add
|
||||
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
|
||||
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
|
||||
// I'm not sure this generalizes, but it works for the kinds of inputs we're
|
||||
// throwing at it.
|
||||
//
|
||||
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
|
||||
|
||||
@@ -97,7 +97,7 @@ with the following options:
|
||||
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
||||
no directories.
|
||||
|
||||
If ` + "`--stat`" + ` is set then the the output is not an array of items,
|
||||
If ` + "`--stat`" + ` is set then the output is not an array of items,
|
||||
but instead a single JSON blob will be returned about the item pointed to.
|
||||
This will return an error if the item isn't found, however on bucket based
|
||||
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
||||
|
||||
@@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=m
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
` + "```" + `
|
||||
|
||||
The vfsOpt are as described in options/get and can be seen in the the
|
||||
The vfsOpt are as described in options/get and can be seen in the
|
||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||
|
||||
` + "```console" + `
|
||||
|
||||
@@ -34,7 +34,7 @@ argument by passing a hyphen as an argument. This will use the first
|
||||
line of STDIN as the password not including the trailing newline.
|
||||
|
||||
` + "```console" + `
|
||||
echo "secretpassword" | rclone obscure -
|
||||
echo 'secretpassword' | rclone obscure -
|
||||
` + "```" + `
|
||||
|
||||
If there is no data on STDIN to read, rclone obscure will default to
|
||||
|
||||
@@ -291,7 +291,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
||||
}
|
||||
}
|
||||
fs.Debugf(c.what, " - accepted: %v\n", ok)
|
||||
err = req.Reply(ok, reply)
|
||||
err := req.Reply(ok, reply)
|
||||
if err != nil {
|
||||
fs.Errorf(c.what, "Failed to Reply to request: %v", err)
|
||||
return
|
||||
|
||||
@@ -116,6 +116,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
|
||||
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
|
||||
{{< provider name="Backblaze B2" home="https://www.backblaze.com/cloud-storage" config="/b2/" >}}
|
||||
{{< provider name="Box" home="https://www.box.com/" config="/box/" >}}
|
||||
{{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
|
||||
@@ -128,12 +129,14 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
||||
{{< provider name="Filen" home="https://www.filen.io/" config="/filen/" >}}
|
||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
@@ -212,7 +215,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}}
|
||||
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
|
||||
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
|
||||
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}
|
||||
|
||||
@@ -1063,3 +1063,7 @@ put them back in again. -->
|
||||
- vupn0712 <126212736+vupn0712@users.noreply.github.com>
|
||||
- darkdragon-001 <darkdragon-001@users.noreply.github.com>
|
||||
- sys6101 <csvmen@gmail.com>
|
||||
- Nicolas Dessart <nds@outsight.tech>
|
||||
- Qingwei Li <332664203@qq.com>
|
||||
- yy <yhymmt37@gmail.com>
|
||||
- Marc-Philip <marc-philip.werner@sap.com>
|
||||
|
||||
@@ -1015,10 +1015,6 @@ rclone [flags]
|
||||
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
|
||||
--union-upstreams string List of space separated upstreams
|
||||
-u, --update Skip files that are newer on the destination
|
||||
--uptobox-access-token string Your access token
|
||||
--uptobox-description string Description of the remote
|
||||
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
|
||||
--uptobox-private Set to make uploaded files private
|
||||
--use-cookies Enable session cookiejar
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
|
||||
@@ -43,9 +43,11 @@ See the following for detailed instructions for
|
||||
- [Crypt](/crypt/) - to encrypt other remotes
|
||||
- [DigitalOcean Spaces](/s3/#digitalocean-spaces)
|
||||
- [Digi Storage](/koofr/#digi-storage)
|
||||
- [Drime](/drime/)
|
||||
- [Dropbox](/dropbox/)
|
||||
- [Enterprise File Fabric](/filefabric/)
|
||||
- [FileLu Cloud Storage](/filelu/)
|
||||
- [Filen](/filen/)
|
||||
- [Files.com](/filescom/)
|
||||
- [FTP](/ftp/)
|
||||
- [Gofile](/gofile/)
|
||||
@@ -89,7 +91,6 @@ See the following for detailed instructions for
|
||||
- [SugarSync](/sugarsync/)
|
||||
- [Union](/union/)
|
||||
- [Uloz.to](/ulozto/)
|
||||
- [Uptobox](/uptobox/)
|
||||
- [WebDAV](/webdav/)
|
||||
- [Yandex Disk](/yandex/)
|
||||
- [Zoho WorkDrive](/zoho/)
|
||||
|
||||
244
docs/content/drime.md
Normal file
244
docs/content/drime.md
Normal file
@@ -0,0 +1,244 @@
|
||||
---
|
||||
title: "Drime"
|
||||
description: "Rclone docs for Drime"
|
||||
versionIntroduced: "v1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-cloud" >}} Drime
|
||||
|
||||
[Drime](https://drime.cloud/) is a cloud storage and transfer service focused
|
||||
on fast, resilient file delivery. It offers both free and paid tiers with
|
||||
emphasis on high-speed uploads and link sharing.
|
||||
|
||||
To setup Drime you need to log in, navigate to Settings, Developer, and create a
|
||||
token to use as an API access key. Give it a sensible name and copy the token
|
||||
for use in the config.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is a run through of `rclone config` to make a remote called `remote`.
|
||||
|
||||
Firstly run:
|
||||
|
||||
|
||||
```console
|
||||
rclone config
|
||||
```
|
||||
|
||||
Then follow through the interactive setup:
|
||||
|
||||
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
XX / Drime
|
||||
\ (drime)
|
||||
Storage> drime
|
||||
|
||||
Option access_token.
|
||||
API Access token
|
||||
You can get this from the web control panel.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_token> YOUR_API_ACCESS_TOKEN
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: drime
|
||||
- access_token: YOUR_API_ACCESS_TOKEN
|
||||
Keep this "remote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
||||
name you gave your remote):
|
||||
|
||||
List directories and files in the top level of your Drime
|
||||
|
||||
```console
|
||||
rclone lsf remote:
|
||||
```
|
||||
|
||||
To copy a local directory to a Drime directory called backup
|
||||
|
||||
```console
|
||||
rclone copy /home/source remote:backup
|
||||
```
|
||||
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Drime does not support modification times or hashes.
|
||||
|
||||
This means that by default syncs will only use the size of the file to determine
|
||||
if it needs updating.
|
||||
|
||||
You can use the `--update` flag which will use the time the object was uploaded.
|
||||
For many operations this is sufficient to determine if it has changed. However
|
||||
files created with timestamps in the past will be missed by the sync if using
|
||||
`--update`.
|
||||
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
the following characters are also replaced:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| \ | 0x5C | \ |
|
||||
|
||||
File names can also not start or end with the following characters. These only
|
||||
get replaced if they are the first or last character in the name:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| SP | 0x20 | ␠ |
|
||||
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in JSON strings.
|
||||
|
||||
### Root folder ID
|
||||
|
||||
You can set the `root_folder_id` for rclone. This is the directory
|
||||
(identified by its `Folder ID`) that rclone considers to be the root
|
||||
of your Drime drive.
|
||||
|
||||
Normally you will leave this blank and rclone will determine the
|
||||
correct root to use itself and fill in the value in the config file.
|
||||
|
||||
However you can set this to restrict rclone to a specific folder
|
||||
hierarchy.
|
||||
|
||||
In order to do this you will have to find the `Folder ID` of the
|
||||
directory you wish rclone to display.
|
||||
|
||||
You can do this with rclone
|
||||
|
||||
```console
|
||||
$ rclone lsf -Fip --dirs-only remote:
|
||||
d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
|
||||
f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
|
||||
d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
|
||||
```
|
||||
|
||||
The ID to use is the part before the `;` so you could set
|
||||
|
||||
```text
|
||||
root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
|
||||
```
|
||||
|
||||
To restrict rclone to the `Files` directory.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drime/drime.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to drime (Drime).
|
||||
|
||||
#### --drime-access-token
|
||||
|
||||
API Access token
|
||||
|
||||
You can get this from the web control panel.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_DRIME_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to drime (Drime).
|
||||
|
||||
#### --drime-root-folder-id
|
||||
|
||||
ID of the root folder
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
If you want rclone to be restricted to a particular folder you can
|
||||
fill it in - see the docs for more info.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: root_folder_id
|
||||
- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-workspace-id
|
||||
|
||||
Account ID
|
||||
|
||||
Leave this blank normally, rclone will fill it in automatically.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: workspace_id
|
||||
- Env Var: RCLONE_DRIME_WORKSPACE_ID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --drime-list-chunk
|
||||
|
||||
Number of items to list in each call
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: list_chunk
|
||||
- Env Var: RCLONE_DRIME_LIST_CHUNK
|
||||
- Type: int
|
||||
- Default: 1000
|
||||
|
||||
#### --drime-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_DRIME_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
|
||||
|
||||
#### --drime-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_DRIME_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
## Limitations
|
||||
|
||||
Drime only supports filenames up to 255 bytes in length, where filenames are
|
||||
encoded in UTF8.
|
||||
|
||||
@@ -316,3 +316,47 @@ back again when transferring to a different storage system where the
|
||||
original characters are supported. When the same Unicode characters
|
||||
are intentionally used in file names, this replacement strategy leads
|
||||
to unwanted renames. Read more under section [caveats](/overview/#restricted-filenames-caveats).
|
||||
|
||||
### Why does rclone fail to connect over TLS but another client works?
|
||||
|
||||
If you see TLS handshake failures (or packet captures show the server
|
||||
rejecting all offered ciphers), the server/proxy may only support
|
||||
legacy TLS cipher suites (for example RSA key-exchange ciphers
|
||||
such as `RSA_WITH_AES_256_CBC_SHA256`, or old 3DES ciphers). Recent Go
|
||||
versions (which rclone is built with) have **removed insecure ciphers
|
||||
from the default list**, so rclone may refuse to negotiate them even
|
||||
if other tools still do.
|
||||
|
||||
If you can't update/reconfigure the server/proxy to support modern TLS
|
||||
(TLS 1.2/1.3) and ECDHE-based cipher suites you can re-enable legacy
|
||||
ciphers via `GODEBUG`:
|
||||
|
||||
- Windows (cmd.exe):
|
||||
|
||||
```bat
|
||||
set GODEBUG=tlsrsakex=1
|
||||
rclone copy ...
|
||||
```
|
||||
|
||||
- Windows (PowerShell):
|
||||
|
||||
```powershell
|
||||
$env:GODEBUG="tlsrsakex=1"
|
||||
rclone copy ...
|
||||
```
|
||||
|
||||
- Linux/macOS:
|
||||
|
||||
```sh
|
||||
GODEBUG=tlsrsakex=1 rclone copy ...
|
||||
```
|
||||
|
||||
If the server only supports 3DES, try:
|
||||
|
||||
```sh
|
||||
GODEBUG=tls3des=1 rclone ...
|
||||
```
|
||||
|
||||
This applies to **any rclone feature using TLS** (HTTPS, FTPS, WebDAV
|
||||
over TLS, proxies with TLS interception, etc.). Use these workarounds
|
||||
only long enough to get the server/proxy updated.
|
||||
|
||||
244
docs/content/filen.md
Normal file
244
docs/content/filen.md
Normal file
@@ -0,0 +1,244 @@
|
||||
---
|
||||
title: "Filen"
|
||||
description: "Rclone docs for Filen"
|
||||
versionIntroduced: "1.73"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-solid fa-f" >}} Filen
|
||||
## Configuration
|
||||
The initial setup for Filen requires that you get an API key for your account,
|
||||
currently this is only possible using the [Filen CLI](https://github.com/FilenCloudDienste/filen-cli).
|
||||
This means you must first download the CLI, login, and then run the `export-api-key` command.
|
||||
|
||||
Here is an example of how to make a remote called `FilenRemote`. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
name> FilenRemote
|
||||
Option Storage.
|
||||
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Filen
|
||||
\ "filen"
|
||||
[snip]
|
||||
Storage> filen
|
||||
|
||||
Option Email.
|
||||
The email of your Filen account
|
||||
Enter a value.
|
||||
Email> youremail@provider.com
|
||||
|
||||
Option Password.
|
||||
The password of your Filen account
|
||||
Choose an alternative below.
|
||||
y) Yes, type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
|
||||
Option API Key.
|
||||
An API Key for your Filen account
|
||||
Get this using the Filen CLI export-api-key command
|
||||
You can download the Filen CLI from https://github.com/FilenCloudDienste/filen-cli
|
||||
Choose an alternative below.
|
||||
y) Yes, type in my own password
|
||||
g) Generate random password
|
||||
y/g> y
|
||||
Enter the password:
|
||||
password:
|
||||
Confirm the password:
|
||||
password:
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: filen
|
||||
- Email: youremail@provider.com
|
||||
- Password: *** ENCRYPTED ***
|
||||
- API Key: *** ENCRYPTED ***
|
||||
Keep this "FilenRemote" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
Modification times are fully supported for files, for directories, only the creation time matters.
|
||||
|
||||
Filen supports Blake3 hashes.
|
||||
|
||||
### Restricted filename characters
|
||||
Invalid UTF-8 bytes will be [replaced](/overview/#invalid-utf8)
|
||||
|
||||
|
||||
### API Key
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/filen/filen.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to filen (Filen).
|
||||
|
||||
#### --filen-email
|
||||
|
||||
Email of your Filen account
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: email
|
||||
- Env Var: RCLONE_FILEN_EMAIL
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --filen-password
|
||||
|
||||
Password of your Filen account
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: password
|
||||
- Env Var: RCLONE_FILEN_PASSWORD
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
#### --filen-api-key
|
||||
|
||||
API Key for your Filen account
|
||||
|
||||
Get this using the Filen CLI export-api-key command
|
||||
You can download the Filen CLI from https://github.com/FilenCloudDienste/filen-cli
|
||||
|
||||
**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: api_key
|
||||
- Env Var: RCLONE_FILEN_API_KEY
|
||||
- Type: string
|
||||
- Required: true
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to filen (Filen).
|
||||
|
||||
#### --filen-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_FILEN_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
|
||||
|
||||
#### --filen-upload-concurrency
|
||||
|
||||
Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently for multipart uploads.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--filen-upload-concurrency" chunks stored at once
|
||||
in memory.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: upload_concurrency
|
||||
- Env Var: RCLONE_FILEN_UPLOAD_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 16
|
||||
|
||||
#### --filen-master-keys
|
||||
|
||||
Master Keys (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: master_keys
|
||||
- Env Var: RCLONE_FILEN_MASTER_KEYS
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --filen-private-key
|
||||
|
||||
Private RSA Key (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: private_key
|
||||
- Env Var: RCLONE_FILEN_PRIVATE_KEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --filen-public-key
|
||||
|
||||
Public RSA Key (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: public_key
|
||||
- Env Var: RCLONE_FILEN_PUBLIC_KEY
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --filen-auth-version
|
||||
|
||||
Authentication Version (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: auth_version
|
||||
- Env Var: RCLONE_FILEN_AUTH_VERSION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --filen-base-folder-uuid
|
||||
|
||||
UUID of Account Root Directory (internal use only)
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: base_folder_uuid
|
||||
- Env Var: RCLONE_FILEN_BASE_FOLDER_UUID
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --filen-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_FILEN_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
@@ -1138,10 +1138,6 @@ Backend-only flags (these can be set in the config file also).
|
||||
--union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
|
||||
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
|
||||
--union-upstreams string List of space separated upstreams
|
||||
--uptobox-access-token string Your access token
|
||||
--uptobox-description string Description of the remote
|
||||
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
|
||||
--uptobox-private Set to make uploaded files private
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
|
||||
--webdav-bearer-token-command string Command to run to get a bearer token
|
||||
|
||||
@@ -498,6 +498,12 @@ URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
|
||||
@@ -23,9 +23,11 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Box | SHA1 | R/W | Yes | No | - | - |
|
||||
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
|
||||
| Cloudinary | MD5 | R | No | Yes | - | - |
|
||||
| Drime | - | - | No | No | R/W | - |
|
||||
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
|
||||
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
|
||||
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
|
||||
| Filen | Blake3 | R/W | Yes | No | R/W | - |
|
||||
| Files.com | MD5, CRC32 | DR/W | Yes | No | R | - |
|
||||
| FTP | - | R/W ¹⁰ | No | No | - | - |
|
||||
| Gofile | MD5 | DR/W | No | Yes | R | - |
|
||||
@@ -65,7 +67,6 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| SugarSync | - | - | No | No | - | - |
|
||||
| Storj | - | R | No | No | - | - |
|
||||
| Uloz.to | MD5, SHA256 ¹³ | - | No | Yes | - | - |
|
||||
| Uptobox | - | - | No | Yes | - | - |
|
||||
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
|
||||
| Yandex Disk | MD5 | R/W | No | No | R | - |
|
||||
| Zoho WorkDrive | - | - | No | No | - | - |
|
||||
@@ -515,9 +516,11 @@ upon backend-specific capabilities.
|
||||
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
|
||||
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Drime | Yes | Yes | Yes | Yes | No | No | Yes | Yes | No | No | Yes |
|
||||
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
|
||||
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
|
||||
| Filen | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes |
|
||||
| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes |
|
||||
| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
|
||||
@@ -556,7 +559,6 @@ upon backend-specific capabilities.
|
||||
| SugarSync | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes |
|
||||
| Storj | Yes ² | Yes | Yes | No | No | Yes | Yes | No | Yes | No | No |
|
||||
| Uloz.to | No | No | Yes | Yes | No | No | No | No | No | No | Yes |
|
||||
| Uptobox | No | Yes | Yes | Yes | No | No | No | No | No | No | No |
|
||||
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |
|
||||
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
|
||||
| Zoho WorkDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
|
||||
|
||||
@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="China Mobile Ecloud Elastic Object Storage (EOS)" home="https://ecloud.10086.cn/home/product-introduction/eos/" config="/s3/#china-mobile-ecloud-eos" >}}
|
||||
{{< provider name="Cloudflare R2" home="https://blog.cloudflare.com/r2-open-beta/" config="/s3/#cloudflare-r2" >}}
|
||||
{{< provider name="Arvan Cloud Object Storage (AOS)" home="https://www.arvancloud.com/en/products/cloud-storage" config="/s3/#arvan-cloud" >}}
|
||||
{{< provider name="Bizfly Cloud Simple Storage" home="https://bizflycloud.vn/" config="/s3/#bizflycloud" >}}
|
||||
{{< provider name="Cubbit DS3" home="https://cubbit.io/ds3-cloud" config="/s3/#Cubbit" >}}
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
@@ -4536,6 +4537,36 @@ server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
### BizflyCloud {#bizflycloud}
|
||||
|
||||
[Bizfly Cloud Simple Storage](https://bizflycloud.vn/simple-storage) is an
|
||||
S3-compatible service with regions in Hanoi (HN) and Ho Chi Minh City (HCM).
|
||||
|
||||
Use the endpoint for your region:
|
||||
|
||||
- HN: `hn.ss.bfcplatform.vn`
|
||||
- HCM: `hcm.ss.bfcplatform.vn`
|
||||
|
||||
A minimal configuration looks like this.
|
||||
|
||||
```ini
|
||||
[bizfly]
|
||||
type = s3
|
||||
provider = BizflyCloud
|
||||
env_auth = false
|
||||
access_key_id = YOUR_ACCESS_KEY
|
||||
secret_access_key = YOUR_SECRET_KEY
|
||||
region = HN
|
||||
endpoint = hn.ss.bfcplatform.vn
|
||||
location_constraint =
|
||||
acl =
|
||||
server_side_encryption =
|
||||
storage_class =
|
||||
```
|
||||
|
||||
Switch `region` and `endpoint` to `HCM` and `hcm.ss.bfcplatform.vn` for Ho Chi
|
||||
Minh City.
|
||||
|
||||
### Ceph
|
||||
|
||||
[Ceph](https://ceph.com/) is an open-source, unified, distributed
|
||||
|
||||
@@ -1186,6 +1186,12 @@ URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
|
||||
Supports the format http://user:pass@host:port, http://host:port, http://host.
|
||||
|
||||
Example:
|
||||
|
||||
http://myUser:myPass@proxyhostname.example.com:8000
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ Shade uses multipart uploads by default. This means that files will be chunked a
|
||||
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
|
||||
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/shade/shade.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to shade (Shade FS).
|
||||
@@ -183,7 +183,7 @@ Properties:
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
---
|
||||
title: "Uptobox"
|
||||
description: "Rclone docs for Uptobox"
|
||||
versionIntroduced: "v1.56"
|
||||
---
|
||||
|
||||
# {{< icon "fa fa-archive" >}} Uptobox
|
||||
|
||||
This is a Backend for Uptobox file storage service. Uptobox is closer to a
|
||||
one-click hoster than a traditional cloud storage provider and therefore not
|
||||
suitable for long term storage.
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
## Configuration
|
||||
|
||||
To configure an Uptobox backend you'll need your personal api token. You'll find
|
||||
it in your [account settings](https://uptobox.com/my_account).
|
||||
|
||||
Here is an example of how to make a remote called `remote` with the default setup.
|
||||
First run:
|
||||
|
||||
```console
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```text
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
TestUptobox uptobox
|
||||
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q> n
|
||||
name> uptobox
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
[...]
|
||||
37 / Uptobox
|
||||
\ "uptobox"
|
||||
[...]
|
||||
Storage> uptobox
|
||||
** See help for uptobox backend at: https://rclone.org/uptobox/ **
|
||||
|
||||
Your API Key, get it from https://uptobox.com/my_account
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
api_key> xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[uptobox]
|
||||
type = uptobox
|
||||
api_key = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d>
|
||||
```
|
||||
|
||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
||||
name you gave your remote):
|
||||
|
||||
List directories in top level of your Uptobox
|
||||
|
||||
```console
|
||||
rclone lsd remote:
|
||||
```
|
||||
|
||||
List all the files in your Uptobox
|
||||
|
||||
```console
|
||||
rclone ls remote:
|
||||
```
|
||||
|
||||
To copy a local directory to an Uptobox directory called backup
|
||||
|
||||
```console
|
||||
rclone copy /home/source remote:backup
|
||||
```
|
||||
|
||||
### Modification times and hashes
|
||||
|
||||
Uptobox supports neither modified times nor checksums. All timestamps
|
||||
will read as that set by `--default-time`.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
the following characters are also replaced:
|
||||
|
||||
| Character | Value | Replacement |
|
||||
| --------- |:-----:|:-----------:|
|
||||
| " | 0x22 | " |
|
||||
| ` | 0x41 | ` |
|
||||
|
||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||
as they can't be used in XML strings.
|
||||
|
||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/uptobox/uptobox.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to uptobox (Uptobox).
|
||||
|
||||
#### --uptobox-access-token
|
||||
|
||||
Your access token.
|
||||
|
||||
Get it from https://uptobox.com/my_account.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: access_token
|
||||
- Env Var: RCLONE_UPTOBOX_ACCESS_TOKEN
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to uptobox (Uptobox).
|
||||
|
||||
#### --uptobox-private
|
||||
|
||||
Set to make uploaded files private
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: private
|
||||
- Env Var: RCLONE_UPTOBOX_PRIVATE
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --uptobox-encoding
|
||||
|
||||
The encoding for the backend.
|
||||
|
||||
See the [encoding section in the overview](/overview/#encoding) for more info.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: encoding
|
||||
- Env Var: RCLONE_UPTOBOX_ENCODING
|
||||
- Type: Encoding
|
||||
- Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
|
||||
|
||||
#### --uptobox-description
|
||||
|
||||
Description of the remote.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: description
|
||||
- Env Var: RCLONE_UPTOBOX_DESCRIPTION
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
<!-- autogenerated options stop -->
|
||||
|
||||
## Limitations
|
||||
|
||||
Uptobox will delete inactive files that have not been accessed in 60 days.
|
||||
|
||||
`rclone about` is not supported by this backend an overview of used space can however
|
||||
been seen in the uptobox web interface.
|
||||
@@ -10,27 +10,21 @@
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-header">Platinum Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img src="/img/logos/rabata.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-header">Gold Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://www.idrive.com/e2/?refer=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor IDrive e2"><img src="/img/logos/idrive_e2.svg" viewBox="0 0 60 55"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Gold Sponsor
|
||||
</div>
|
||||
<div class="card-header">Gold Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone" target="_blank" rel="noopener" title="Start Your Free Trial Today"><img style="max-width: 100%; height: auto;" src="/img/logos/filescom-enterprise-grade-workflows.png"></a><br />
|
||||
</div>
|
||||
@@ -38,25 +32,19 @@
|
||||
|
||||
{{if .IsHome}}
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview-banner.svg"></a><br />
|
||||
<a href="https://rcloneview.com/?utm_source=rclone" target="_blank" rel="noopener" title="Visit rclone's sponsor RcloneView"><img src="/img/logos/rcloneview.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
|
||||
<a href="https://rcloneui.com" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-header">Silver Sponsor</div>
|
||||
<div class="card-body">
|
||||
<a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br />
|
||||
</div>
|
||||
|
||||
@@ -66,10 +66,12 @@
|
||||
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a>
|
||||
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a>
|
||||
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
|
||||
<a class="dropdown-item" href="/drime/"><i class="fab fa-cloud fa-fw"></i> Drime</a>
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
|
||||
<a class="dropdown-item" href="/s3/#filelu-s5"><i class="fa fa-folder fa-fw"></i> FileLu S5 (S3-Compatible)</a>
|
||||
<a class="dropdown-item" href="/filen/"><i class="fa fa-solid fa-f"></i> Filen</a>
|
||||
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
|
||||
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
|
||||
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
|
||||
@@ -112,7 +114,6 @@
|
||||
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
|
||||
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>
|
||||
<a class="dropdown-item" href="/ulozto/"><i class="fas fa-angle-double-down fa-fw"></i> Uloz.to</a>
|
||||
<a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive fa-fw"></i> Uptobox</a>
|
||||
<a class="dropdown-item" href="/union/"><i class="fa fa-link fa-fw"></i> Union (merge backends)</a>
|
||||
<a class="dropdown-item" href="/webdav/"><i class="fa fa-server fa-fw"></i> WebDAV</a>
|
||||
<a class="dropdown-item" href="/yandex/"><i class="fa fa-space-shuttle fa-fw"></i> Yandex Disk</a>
|
||||
|
||||
@@ -764,7 +764,7 @@ func SetCacheDir(path string) (err error) {
|
||||
//
|
||||
// To override the default we therefore set environment variable TMPDIR
|
||||
// on Unix systems, and both TMP and TEMP on Windows (they are almost exclusively
|
||||
// aliases for the same path, and programs may refer to to either of them).
|
||||
// aliases for the same path, and programs may refer to either of them).
|
||||
// This should make all libraries and forked processes use the same.
|
||||
func SetTempDir(path string) (err error) {
|
||||
var tempDir string
|
||||
|
||||
@@ -31,7 +31,7 @@ func camelToSnake(in string) string {
|
||||
//
|
||||
// Builtin types are expected to be encoding as their natural
|
||||
// stringificatons as produced by fmt.Sprint except for []string which
|
||||
// is expected to be encoded a a CSV with empty array encoded as "".
|
||||
// is expected to be encoded as a CSV with empty array encoded as "".
|
||||
//
|
||||
// Any other types are expected to be encoded by their String()
|
||||
// methods and decoded by their `Set(s string) error` methods.
|
||||
@@ -93,7 +93,7 @@ func StringToInterface(def any, in string) (newValue any, err error) {
|
||||
//
|
||||
// Builtin types are expected to be encoding as their natural
|
||||
// stringificatons as produced by fmt.Sprint except for []string which
|
||||
// is expected to be encoded a a CSV with empty array encoded as "".
|
||||
// is expected to be encoded as a CSV with empty array encoded as "".
|
||||
//
|
||||
// Any other types are expected to be encoded by their String()
|
||||
// methods and decoded by their `Set(s string) error` methods.
|
||||
|
||||
@@ -180,7 +180,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
|
||||
// GetPasswordCommand gets the password using the --password-command setting
|
||||
//
|
||||
// If the the --password-command flag was not in use it returns "", nil
|
||||
// If the --password-command flag was not in use it returns "", nil
|
||||
func GetPasswordCommand(ctx context.Context) (pass string, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if len(ci.PasswordCommand) == 0 {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -626,6 +627,7 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
remotes = make(chan string, checkers)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
)
|
||||
ec := errcount.New()
|
||||
for range checkers {
|
||||
g.Go(func() (err error) {
|
||||
var entries = make(fs.DirEntries, 1)
|
||||
@@ -634,7 +636,8 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Skip files that are not found
|
||||
} else if err != nil {
|
||||
return err
|
||||
fs.Errorf(remote, "--files-from failed to find file: %v", err)
|
||||
ec.Add(err)
|
||||
} else {
|
||||
err = callback(entries)
|
||||
if err != nil {
|
||||
@@ -654,7 +657,8 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte
|
||||
}
|
||||
}
|
||||
close(remotes)
|
||||
return g.Wait()
|
||||
ec.Add(g.Wait())
|
||||
return ec.Err("failed to read --files-from files")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -394,7 +394,7 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
// Now check an error is returned from NewObject
|
||||
require.NoError(t, f.AddFile("error"))
|
||||
err = listR(context.Background(), "", listRcallback)
|
||||
require.EqualError(t, err, assert.AnError.Error())
|
||||
require.EqualError(t, err, "failed to read --files-from files: assert.AnError general error for testing")
|
||||
|
||||
// The checker will exit by the error above
|
||||
ci := fs.GetConfig(context.Background())
|
||||
@@ -403,7 +403,7 @@ func TestNewFilterMakeListR(t *testing.T) {
|
||||
// Now check an error is returned from NewObject
|
||||
require.NoError(t, f.AddFile("error"))
|
||||
err = listR(context.Background(), "", listRcallback)
|
||||
require.EqualError(t, err, assert.AnError.Error())
|
||||
require.EqualError(t, err, "failed to read --files-from files: assert.AnError general error for testing")
|
||||
}
|
||||
|
||||
func TestNewFilterMinSize(t *testing.T) {
|
||||
|
||||
@@ -225,7 +225,7 @@ func fromTypes(set Set) (map[Type]hash.Hash, error) {
|
||||
// single multiwriter, where one write will update all
|
||||
// the hashers.
|
||||
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
|
||||
// Convert to to slice
|
||||
// Convert to slice
|
||||
var w = make([]io.Writer, 0, len(h))
|
||||
for _, v := range h {
|
||||
w = append(w, v)
|
||||
|
||||
@@ -79,7 +79,7 @@ type Options struct {
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
MaxSize fs.SizeSuffix `config:"log_file_max_size"` // Max size of log file
|
||||
MaxBackups int `config:"log_file_max_backups"` // Max backups of log file
|
||||
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of of log file
|
||||
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of log file
|
||||
Compress bool `config:"log_file_compress"` // Set to compress log file
|
||||
Format logFormat `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
|
||||
@@ -16,7 +16,7 @@ func startSystemdLog(handler *OutputHandler) bool {
|
||||
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
|
||||
handler.setFormatFlags(logFormatNoLevel)
|
||||
handler.SetOutput(func(level slog.Level, text string) {
|
||||
_ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s\n", level, text)
|
||||
_ = journal.Print(slogLevelToSystemdPriority(level), "%-6s: %s", level, text)
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -921,6 +921,18 @@ See the [hashsum](/commands/rclone_hashsum/) command for more information on the
|
||||
})
|
||||
}
|
||||
|
||||
// Parse download, base64 and hashType parameters
|
||||
func parseHashParameters(in rc.Params) (download bool, base64 bool, ht hash.Type, err error) {
|
||||
download, _ = in.GetBool("download")
|
||||
base64, _ = in.GetBool("base64")
|
||||
hashType, err := in.GetString("hashType")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ht.Set(hashType)
|
||||
return
|
||||
}
|
||||
|
||||
// Hashsum a directory
|
||||
func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
ctx, f, err := rc.GetFsNamedFileOK(ctx, in, "fs")
|
||||
@@ -928,16 +940,9 @@ func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
download, _ := in.GetBool("download")
|
||||
base64, _ := in.GetBool("base64")
|
||||
hashType, err := in.GetString("hashType")
|
||||
download, base64, ht, err := parseHashParameters(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err)
|
||||
}
|
||||
var ht hash.Type
|
||||
err = ht.Set(hashType)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s\n%w", hash.HelpString(0), err)
|
||||
return out, err
|
||||
}
|
||||
|
||||
hashes := []string{}
|
||||
@@ -948,3 +953,64 @@ func rcHashsum(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "operations/hashsumfile",
|
||||
AuthRequired: true,
|
||||
Fn: rcHashsumFile,
|
||||
Title: "Produces a hash for a single file.",
|
||||
Help: `Produces a hash for a single file using the hash named.
|
||||
|
||||
This takes the following parameters:
|
||||
|
||||
- fs - a remote name string e.g. "drive:"
|
||||
- remote - a path within that remote e.g. "file.txt"
|
||||
- hashType - type of hash to be used
|
||||
- download - check by downloading rather than with hash (boolean)
|
||||
- base64 - output the hashes in base64 rather than hex (boolean)
|
||||
|
||||
If you supply the download flag, it will download the data from the
|
||||
remote and create the hash on the fly. This can be useful for remotes
|
||||
that don't support the given hash or if you really want to read all
|
||||
the data.
|
||||
|
||||
Returns:
|
||||
|
||||
- hash - hash for the file
|
||||
- hashType - type of hash used
|
||||
|
||||
Example:
|
||||
|
||||
$ rclone rc --loopback operations/hashsumfile fs=/ remote=/bin/bash hashType=MD5 download=true base64=true
|
||||
{
|
||||
"hashType": "md5",
|
||||
"hash": "MDMw-fG2YXs7Uz5Nz-H68A=="
|
||||
}
|
||||
|
||||
See the [hashsum](/commands/rclone_hashsum/) command for more information on the above.
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Hashsum a file
|
||||
func rcHashsumFile(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
f, remote, err := rc.GetFsAndRemote(ctx, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
download, base64, ht, err := parseHashParameters(in)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sum, err := HashSum(ctx, ht, base64, download, o)
|
||||
out = rc.Params{
|
||||
"hashType": ht.String(),
|
||||
"hash": sum,
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
@@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
|
||||
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq := httptest.NewRequest("POST", "/", formReader)
|
||||
@@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) {
|
||||
assert.NoError(t, currentFile2.Close())
|
||||
}()
|
||||
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName)
|
||||
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream")
|
||||
require.NoError(t, err)
|
||||
|
||||
httpReq = httptest.NewRequest("POST", "/", formReader)
|
||||
@@ -840,7 +840,7 @@ func TestRcHashsum(t *testing.T) {
|
||||
}
|
||||
|
||||
// operations/hashsum: hashsum a single file
|
||||
func TestRcHashsumFile(t *testing.T) {
|
||||
func TestRcHashsumSingleFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r, call := rcNewRun(t, "operations/hashsum")
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
@@ -866,3 +866,27 @@ func TestRcHashsumFile(t *testing.T) {
|
||||
assert.Equal(t, "md5", out["hashType"])
|
||||
assert.Equal(t, []string{"0ef726ce9b1a7692357ff70dd321d595 hashsum-file1"}, out["hashsum"])
|
||||
}
|
||||
|
||||
// operations/hashsumfile: hashsum a single file
|
||||
func TestRcHashsumFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r, call := rcNewRun(t, "operations/hashsumfile")
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
|
||||
file1Contents := "file1 contents"
|
||||
file1 := r.WriteBoth(ctx, "hashsumfile-file1", file1Contents, t1)
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, file1)
|
||||
|
||||
in := rc.Params{
|
||||
"fs": r.FremoteName,
|
||||
"remote": file1.Path,
|
||||
"hashType": "MD5",
|
||||
"download": true,
|
||||
}
|
||||
|
||||
out, err := call.Fn(ctx, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "md5", out["hashType"])
|
||||
assert.Equal(t, "0ef726ce9b1a7692357ff70dd321d595", out["hash"])
|
||||
}
|
||||
|
||||
@@ -806,7 +806,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
// only, we expect nothing to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
@@ -843,7 +843,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a file and sync it. Keep the last modified date but change
|
||||
// the size. With --ignore-size we expect nothing to to be
|
||||
// the size. With --ignore-size we expect nothing to be
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -328,7 +328,7 @@ type Flagger interface {
|
||||
// satisfy as non-pointers
|
||||
//
|
||||
// These are from pflag.Value and need to be tested against
|
||||
// non-pointer value due the the way the backend flags are inserted
|
||||
// non-pointer value due to the way the backend flags are inserted
|
||||
// into the flags.
|
||||
type FlaggerNP interface {
|
||||
String() string
|
||||
|
||||
@@ -1273,10 +1273,14 @@ func Run(t *testing.T, opt *Opt) {
|
||||
assert.Equal(t, file2Copy.Path, dst.Remote())
|
||||
|
||||
// check that mutating dst does not mutate src
|
||||
err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z"))
|
||||
if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime {
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?")
|
||||
if !strings.Contains(fs.ConfigStringFull(f), "copy_is_hardlink") {
|
||||
err = dst.SetModTime(ctx, fstest.Time("2004-03-03T04:05:06.499999999Z"))
|
||||
if err != fs.ErrorCantSetModTimeWithoutDelete && err != fs.ErrorCantSetModTime {
|
||||
assert.NoError(t, err)
|
||||
// Re-read the source and check its modtime
|
||||
src = fstest.NewObject(ctx, t, f, src.Remote())
|
||||
assert.False(t, src.ModTime(ctx).Equal(dst.ModTime(ctx)), "mutating dst should not mutate src -- is it Copying by pointer?")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete copy
|
||||
|
||||
@@ -164,6 +164,9 @@ backends:
|
||||
- backend: "gofile"
|
||||
remote: "TestGoFile:"
|
||||
fastlist: true
|
||||
- backend: "filen"
|
||||
remote: "TestFilen:"
|
||||
fastlist: false
|
||||
- backend: "filescom"
|
||||
remote: "TestFilesCom:"
|
||||
fastlist: false
|
||||
@@ -624,11 +627,6 @@ backends:
|
||||
- TestSyncUTFNorm
|
||||
ignoretests:
|
||||
- cmd/gitannex
|
||||
# - backend: "uptobox"
|
||||
# remote: "TestUptobox:"
|
||||
# fastlist: false
|
||||
# ignore:
|
||||
# - TestRWFileHandleWriteNoWrite
|
||||
- backend: "oracleobjectstorage"
|
||||
remote: "TestOracleObjectStorage:"
|
||||
fastlist: true
|
||||
@@ -677,3 +675,9 @@ backends:
|
||||
# with the parent backend having a different precision.
|
||||
- TestServerSideCopyOverSelf
|
||||
- TestServerSideMoveOverSelf
|
||||
- backend: "drime"
|
||||
remote: "TestDrime:"
|
||||
ignoretests:
|
||||
# The TestBisyncRemoteLocal/check_access_filters tests fail due to duplicated objects
|
||||
- cmd/bisync
|
||||
fastlist: false
|
||||
|
||||
2
go.mod
2
go.mod
@@ -11,6 +11,7 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3
|
||||
github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26
|
||||
github.com/FilenCloudDienste/filen-sdk-go v0.0.35
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
|
||||
github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0
|
||||
@@ -154,6 +155,7 @@ require (
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dromara/dongle v1.0.1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.9.1 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -61,6 +61,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgv
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/FilenCloudDienste/filen-sdk-go v0.0.35 h1:geuYpD/1ZXSp1H3kdW7si+KRUIrHHqM1kk8lqoA8Y9M=
|
||||
github.com/FilenCloudDienste/filen-sdk-go v0.0.35/go.mod h1:0cBhKXQg49XbKZZfk5TCDa3sVLP+xMxZTWL+7KY0XR0=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
|
||||
@@ -232,6 +234,8 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 h1:xJBhC00smQpSZw3Kr0ErMUBXhUSjYoLRm2szxdbRBL0=
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00/go.mod h1:nNICngOdmNImBb/vuL+dSc0aIg3ryNATpjxThNoPw4g=
|
||||
github.com/dromara/dongle v1.0.1 h1:si/7UP/EXxnFVZok1cNos70GiMGxInAYMilHQFP5dJs=
|
||||
github.com/dromara/dongle v1.0.1/go.mod h1:ebFhTaDgxaDIKppycENTWlBsxz8mWCPWOLnsEgDpMv4=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXIiWJETBpRgERYTGlmMd7HU=
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M=
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 h1:2tV76y6Q9BB+NEBasnqvs7e49aEBFI8ejC89PSnWH+4=
|
||||
@@ -249,6 +253,7 @@ github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7
|
||||
github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg=
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff/go.mod h1:HMJKR5wlh/ziNp+sHEDV2ltblO4JD2+IdDOWtGcQBTM=
|
||||
github.com/emmansun/gmsm v0.15.5/go.mod h1:2m4jygryohSWkaSduFErgCwQKab5BNjURoFrn2DNwyU=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
@@ -748,6 +753,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
@@ -828,6 +834,7 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
@@ -904,6 +911,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -916,6 +924,7 @@ golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
@@ -932,6 +941,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
|
||||
@@ -361,9 +361,6 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
|
||||
} else if dc.rootID == dc.trueRootID {
|
||||
return "", errors.New("is root directory")
|
||||
}
|
||||
if dc.rootParentID == "" {
|
||||
return "", errors.New("internal error: didn't find rootParentID")
|
||||
}
|
||||
return dc.rootParentID, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ type Pool struct {
|
||||
}
|
||||
|
||||
// totalMemory is a semaphore used to control total buffer usage of
|
||||
// all Pools. It it may be nil in which case the total buffer usage
|
||||
// all Pools. It may be nil in which case the total buffer usage
|
||||
// will not be controlled. It counts memory in active use, it does not
|
||||
// count memory cached in the pool.
|
||||
var totalMemory *semaphore.Weighted
|
||||
|
||||
@@ -3,6 +3,7 @@ package proxy
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -55,7 +56,13 @@ func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.
|
||||
}
|
||||
|
||||
// send CONNECT
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
user := proxyURL.User
|
||||
if user != nil {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(user.String()))
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\nProxy-Authorization: Basic %s\r\n\r\n", addr, addr, credential)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
}
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)
|
||||
|
||||
@@ -14,7 +14,9 @@ import (
|
||||
"maps"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -145,6 +147,7 @@ type Opts struct {
|
||||
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
|
||||
MultipartContentName string // ..name of the parameter which is the attached file
|
||||
MultipartFileName string // ..name of the file for the attached file
|
||||
MultipartContentType string // ..content type of the attached file
|
||||
Parameters url.Values // any parameters for the final URL
|
||||
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
|
||||
Trailer *http.Header // set the request trailer
|
||||
@@ -371,6 +374,32 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
|
||||
|
||||
func escapeQuotes(s string) string {
|
||||
return quoteEscaper.Replace(s)
|
||||
}
|
||||
|
||||
// multipartFileContentDisposition returns the value of a Content-Disposition header
|
||||
// with the provided field name and file name.
|
||||
func multipartFileContentDisposition(fieldname, filename string) string {
|
||||
return fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
|
||||
escapeQuotes(fieldname), escapeQuotes(filename))
|
||||
}
|
||||
|
||||
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
|
||||
// a new form-data header with the provided field name and file name.
|
||||
func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
|
||||
h := make(textproto.MIMEHeader)
|
||||
// FIXME when go1.24 is no longer supported, change to
|
||||
// multipart.FileContentDisposition and remove definition above
|
||||
h.Set("Content-Disposition", multipartFileContentDisposition(fieldname, filename))
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return w.CreatePart(h)
|
||||
}
|
||||
|
||||
// MultipartUpload creates an io.Reader which produces an encoded a
|
||||
// multipart form upload from the params passed in and the passed in
|
||||
//
|
||||
@@ -382,10 +411,10 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
|
||||
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
|
||||
//
|
||||
// NB This doesn't allow setting the content type of the attachment
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
|
||||
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) {
|
||||
bodyReader, bodyWriter := io.Pipe()
|
||||
writer := multipart.NewWriter(bodyWriter)
|
||||
contentType := writer.FormDataContentType()
|
||||
formContentType := writer.FormDataContentType()
|
||||
|
||||
// Create a Multipart Writer as base for calculating the Content-Length
|
||||
buf := &bytes.Buffer{}
|
||||
@@ -404,7 +433,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
}
|
||||
if in != nil {
|
||||
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName)
|
||||
_, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType)
|
||||
if err != nil {
|
||||
return nil, "", 0, err
|
||||
}
|
||||
@@ -445,7 +474,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
}
|
||||
|
||||
if in != nil {
|
||||
part, err := writer.CreateFormFile(contentName, fileName)
|
||||
part, err := CreateFormFile(writer, contentName, fileName, contentType)
|
||||
if err != nil {
|
||||
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
@@ -467,7 +496,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
||||
_ = bodyWriter.Close()
|
||||
}()
|
||||
|
||||
return bodyReader, contentType, multipartLength, nil
|
||||
return bodyReader, formContentType, multipartLength, nil
|
||||
}
|
||||
|
||||
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
|
||||
@@ -539,7 +568,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo
|
||||
opts = opts.Copy()
|
||||
|
||||
var overhead int64
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
|
||||
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ func (wb *WriteBack) SetID(pid *Handle) {
|
||||
//
|
||||
// Use SetID to create Handles in advance of calling Add.
|
||||
//
|
||||
// If modified is false then it it doesn't cancel a pending upload if
|
||||
// If modified is false then it doesn't cancel a pending upload if
|
||||
// there is one as there is no need.
|
||||
func (wb *WriteBack) Add(id Handle, name string, size int64, modified bool, putFn PutFn) Handle {
|
||||
wb.mu.Lock()
|
||||
|
||||
Reference in New Issue
Block a user