1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-19 09:43:14 +00:00

Compare commits

..

13 Commits

Author SHA1 Message Date
Nick Craig-Wood
b141a553be fshttp: add --dump curl for dumping HTTP requests as curl commands 2025-12-18 17:53:24 +00:00
Nick Craig-Wood
f81cd7d279 serve s3: make errors in --s3-auth-key fatal - fixes #9044
Previously if auth keys were provided without a comma then rclone
would only log an INFO message which could mean it went on to serve
without any auth.

The parsing for environment variables was changed in v1.70.0 to make
them work properly with multiple inputs. This means the input is
treated like a mini CSV file which works well except in this case when
the input has commas. This meant `user,auth` without quotes is treated
as two key pairs `user` and `quote`. The correct syntax is
`"user,auth"`. This updates the documentation accordingly.
2025-12-18 10:17:41 +00:00
Nick Craig-Wood
1a0a4628d7 Add masrlinu to contributors 2025-12-18 10:17:41 +00:00
masrlinu
c10a4d465c pcloud: add support for real-time updates in mount
Co-authored-by: masrlinu <5259918+masrlinu@users.noreply.github.com>
2025-12-17 15:13:25 +00:00
Nick Craig-Wood
3a6e07a613 memory: add --memory-discard flag for speed testing - fixes #9037 2025-12-17 10:21:12 +00:00
Nick Craig-Wood
c36f99d343 Add vyv03354 to contributors 2025-12-17 10:21:12 +00:00
jhasse-shade
3e21a7261b shade: Fix VFS test issues 2025-12-16 17:21:22 +00:00
vyv03354
fd439fab62 docs: mention use of ListR feature in ls docs 2025-12-15 09:11:00 +01:00
dependabot[bot]
976aa6b416 build: bump actions/download-artifact from 6 to 7
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 6 to 7.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v6...v7)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '7'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-13 11:01:27 +01:00
dependabot[bot]
b3a0383ca3 build: bump actions/upload-artifact from 5 to 6
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 5 to 6.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-13 11:00:59 +01:00
dependabot[bot]
c13f129339 build: bump actions/cache from 4 to 5
Bumps [actions/cache](https://github.com/actions/cache) from 4 to 5.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](https://github.com/actions/cache/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-12 14:52:57 +01:00
vyv03354
748d8c8957 docs: reflects the fact that pCloud supports ListR 2025-12-11 20:32:53 +01:00
jbagwell-akamai
4d379efcbb S3: Linode: updated endpoints to use ISO 3166-1 alpha-2 standard
ISO 3166-1 alpha-2 standard for countries and region short name in parentheses instead of separated by another comma
2025-12-11 17:20:34 +00:00
35 changed files with 380 additions and 2125 deletions

View File

@@ -229,7 +229,7 @@ jobs:
cache: false
- name: Cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: |
~/go/pkg/mod

View File

@@ -129,7 +129,7 @@ jobs:
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
@@ -183,7 +183,7 @@ jobs:
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
uses: actions/download-artifact@v7
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -38,7 +38,6 @@ directories to and from different cloud storage providers.
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Drime [:page_facing_up:](https://rclone.org/s3/#drime)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)

View File

@@ -16,7 +16,6 @@ import (
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drime"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"

View File

@@ -1,231 +0,0 @@
// Package api has type definitions for drime
//
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
package api
import (
"encoding/json"
"fmt"
"time"
)
// Types of things in Item
const (
ItemTypeFolder = "folder"
)
type User struct {
Email string `json:"email"`
ID json.Number `json:"id"`
Avatar string `json:"avatar"`
ModelType string `json:"model_type"`
OwnsEntry bool `json:"owns_entry"`
EntryPermissions []any `json:"entry_permissions"`
DisplayName string `json:"display_name"`
}
type Permissions struct {
FilesUpdate bool `json:"files.update"`
FilesCreate bool `json:"files.create"`
FilesDownload bool `json:"files.download"`
FilesDelete bool `json:"files.delete"`
}
// Item describes a folder or a file as returned by /drive/file-entries
type Item struct {
ID json.Number `json:"id"`
Name string `json:"name"`
Description any `json:"description"`
FileName string `json:"file_name"`
Mime string `json:"mime"`
Color any `json:"color"`
Backup bool `json:"backup"`
Tracked int `json:"tracked"`
FileSize int64 `json:"file_size"`
UserID json.Number `json:"user_id"`
ParentID json.Number `json:"parent_id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt any `json:"deleted_at"`
IsDeleted int `json:"is_deleted"`
Path string `json:"path"`
DiskPrefix any `json:"disk_prefix"`
Type string `json:"type"`
Extension any `json:"extension"`
FileHash any `json:"file_hash"`
Public bool `json:"public"`
Thumbnail bool `json:"thumbnail"`
MuxStatus any `json:"mux_status"`
ThumbnailURL any `json:"thumbnail_url"`
WorkspaceID int `json:"workspace_id"`
IsEncrypted int `json:"is_encrypted"`
Iv any `json:"iv"`
VaultID any `json:"vault_id"`
OwnerID int `json:"owner_id"`
Hash string `json:"hash"`
URL string `json:"url"`
Users []User `json:"users"`
Tags []any `json:"tags"`
Permissions Permissions `json:"permissions"`
}
type Listing struct {
CurrentPage int `json:"current_page"`
Data []Item `json:"data"`
From int `json:"from"`
LastPage int `json:"last_page"`
NextPage int `json:"next_page"`
PerPage int `json:"per_page"`
PrevPage int `json:"prev_page"`
To int `json:"to"`
Total int `json:"total"`
}
type UploadResponse struct {
Status string `json:"status"`
FileEntry Item `json:"fileEntry"`
}
type CreateFolderRequest struct {
Name string `json:"name"`
ParentID json.Number `json:"parentId,omitempty"`
}
type CreateFolderResponse struct {
Status string `json:"status"`
Folder Item `json:"folder"`
}
// Error is returned from drime when things go wrong
type Error struct {
Message string `json:"message"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("Error %q", e.Message)
return out
}
// Check Error satisfies the error interface
var _ error = (*Error)(nil)
// DeleteRequest is the input to DELETE /file-entries
type DeleteRequest struct {
EntryIds []string `json:"entryIds"`
DeleteForever bool `json:"deleteForever"`
}
// DeleteResponse is the input to DELETE /file-entries
type DeleteResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Errors map[string]string `json:"errors"`
}
// UpdateItemRequest describes the updates to be done to an item for PUT /file-entries/{id}/
type UpdateItemRequest struct {
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
}
// UpdateItemResponse is returned by PUT /file-entries/{id}/
type UpdateItemResponse struct {
Status string `json:"status"`
FileEntry Item `json:"fileEntry"`
}
// MoveRequest is the input to /file-entries/move
type MoveRequest struct {
EntryIds []string `json:"entryIds"`
DestinationID string `json:"destinationId"`
}
// MoveResponse is returned by POST /file-entries/move
type MoveResponse struct {
Status string `json:"status"`
Entries []Item `json:"entries"`
}
// CopyRequest is the input to /file-entries/duplicate
type CopyRequest struct {
EntryIds []string `json:"entryIds"`
DestinationID string `json:"destinationId"`
}
// CopyResponse is returned by POST /file-entries/duplicate
type CopyResponse struct {
Status string `json:"status"`
Entries []Item `json:"entries"`
}
// MultiPartCreateRequest is the input of POST /s3/multipart/create
type MultiPartCreateRequest struct {
Filename string `json:"filename"`
Mime string `json:"mime"`
Size int64 `json:"size"`
Extension string `json:"extension"`
ParentID json.Number `json:"parent_id"`
RelativePath string `json:"relativePath"`
}
// MultiPartCreateResponse is returned by POST /s3/multipart/create
type MultiPartCreateResponse struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
}
// CompletedPart Type for completed parts when making a multipart upload.
type CompletedPart struct {
ETag string `json:"ETag"`
PartNumber int32 `json:"PartNumber"`
}
// MultiPartGetURLsRequest is the input of POST /s3/multipart/batch-sign-part-urls
type MultiPartGetURLsRequest struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
PartNumbers []int `json:"partNumbers"`
}
// MultiPartGetURLsResponse is the result of POST /s3/multipart/batch-sign-part-urls
type MultiPartGetURLsResponse struct {
URLs []struct {
URL string `json:"url"`
PartNumber int32 `json:"partNumber"`
} `json:"urls"`
}
// MultiPartCompleteRequest is the input to POST /s3/multipart/complete
type MultiPartCompleteRequest struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
Parts []CompletedPart `json:"parts"`
}
// MultiPartCompleteResponse is the result of POST /s3/multipart/complete
type MultiPartCompleteResponse struct {
Location string `json:"location"`
}
// MultiPartEntriesRequest is the input to POST /s3/entries
type MultiPartEntriesRequest struct {
ClientMime string `json:"clientMime"`
ClientName string `json:"clientName"`
Filename string `json:"filename"`
Size int64 `json:"size"`
ClientExtension string `json:"clientExtension"`
ParentID json.Number `json:"parent_id"`
RelativePath string `json:"relativePath"`
}
// MultiPartEntriesResponse is the result of POST /s3/entries
type MultiPartEntriesResponse struct {
FileEntry Item `json:"fileEntry"`
}
// MultiPartAbort is the input of POST /s3/multipart/abort
type MultiPartAbort struct {
UploadID string `json:"uploadId"`
Key string `json:"key"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +0,0 @@
// Drime filesystem interface
package drime
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDrime:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -72,7 +72,7 @@ func (ik *ImageKit) Upload(ctx context.Context, file io.Reader, param UploadPara
response := &UploadResult{}
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName, "application/octet-stream")
formReader, contentType, _, err := rest.MultipartUpload(ctx, file, formParams, "file", param.FileName)
if err != nil {
return nil, nil, fmt.Errorf("failed to make multipart upload: %w", err)

View File

@@ -6,6 +6,7 @@ import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"path"
@@ -24,7 +25,8 @@ import (
var (
hashType = hash.MD5
// the object storage is persistent
buckets = newBucketsInfo()
buckets = newBucketsInfo()
errWriteOnly = errors.New("can't read when using --memory-discard")
)
// Register with Fs
@@ -33,12 +35,32 @@ func init() {
Name: "memory",
Description: "In memory object storage system.",
NewFs: NewFs,
Options: []fs.Option{},
Options: []fs.Option{{
Name: "discard",
Default: false,
Advanced: true,
Help: `If set all writes will be discarded and reads will return an error
If set then when files are uploaded the contents not be saved. The
files will appear to have been uploaded but will give an error on
read. Files will have their MD5 sum calculated on upload which takes
very little CPU time and allows the transfers to be checked.
This can be useful for testing performance.
Probably most easily used by using the connection string syntax:
:memory,discard:bucket
`,
}},
})
}
// Options defines the configuration for this backend
type Options struct{}
type Options struct {
Discard bool `config:"discard"`
}
// Fs represents a remote memory server
type Fs struct {
@@ -164,6 +186,7 @@ type objectData struct {
hash string
mimeType string
data []byte
size int64
}
// Object describes a memory object
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hashType {
return "", hash.ErrUnsupported
}
if o.od.hash == "" {
if o.od.hash == "" && !o.fs.opt.Discard {
sum := md5.Sum(o.od.data)
o.od.hash = hex.EncodeToString(sum[:])
}
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(len(o.od.data))
return o.od.size
}
// ModTime returns the modification time of the object
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.Discard {
return nil, errWriteOnly
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := io.ReadAll(in)
var data []byte
var size int64
var hash string
if o.fs.opt.Discard {
h := md5.New()
size, err = io.Copy(h, in)
hash = hex.EncodeToString(h.Sum(nil))
} else {
data, err = io.ReadAll(in)
size = int64(len(data))
}
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}
o.od = &objectData{
data: data,
hash: "",
size: size,
hash: hash,
modTime: src.ModTime(ctx),
mimeType: fs.MimeType(ctx, src),
}

View File

@@ -222,3 +222,11 @@ type UserInfo struct {
} `json:"steps"`
} `json:"journey"`
}
// DiffResult is the response from /diff
type DiffResult struct {
Result int `json:"result"`
DiffID int64 `json:"diffid"`
Entries []map[string]any `json:"entries"`
Error string `json:"error"`
}

View File

@@ -171,6 +171,7 @@ type Fs struct {
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
lastDiffID int64 // change tracking state for diff long-polling
}
// Object describes a pcloud object
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil
}
// ChangeNotify implements fs.Features.ChangeNotify
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
// Start long-poll loop in background
go f.changeNotifyLoop(ctx, notify, ch)
}
// changeNotifyLoop contains the blocking long-poll logic.
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
// Standard polling interval
interval := 30 * time.Second
// Start with diffID = 0 to get the current state
var diffID int64
// Helper to process changes from the diff API
handleChanges := func(entries []map[string]any) {
notifiedPaths := make(map[string]bool)
for _, entry := range entries {
meta, ok := entry["metadata"].(map[string]any)
if !ok {
continue
}
// Robust extraction of ParentFolderID
var pid int64
if val, ok := meta["parentfolderid"]; ok {
switch v := val.(type) {
case float64:
pid = int64(v)
case int64:
pid = v
case int:
pid = int64(v)
}
}
// Resolve the path using dirCache.GetInv
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
dirID := fmt.Sprintf("d%d", pid)
parentPath, ok := f.dirCache.GetInv(dirID)
if !ok {
// Parent not in cache, so we can ignore this change as it is outside
// of what the mount has seen or cares about.
continue
}
name, _ := meta["name"].(string)
fullPath := path.Join(parentPath, name)
// Determine EntryType (File or Directory)
entryType := fs.EntryObject
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
entryType = fs.EntryDirectory
}
// Deduplicate notifications for this batch
if !notifiedPaths[fullPath] {
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
notify(fullPath, entryType)
notifiedPaths[fullPath] = true
}
}
}
for {
// Check context and channel
select {
case <-ctx.Done():
return
case newInterval, ok := <-ch:
if !ok {
return
}
interval = newInterval
default:
}
// Setup /diff Request
opts := rest.Opts{
Method: "GET",
Path: "/diff",
Parameters: url.Values{},
}
if diffID != 0 {
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
opts.Parameters.Set("block", "1")
} else {
opts.Parameters.Set("last", "0")
}
// Perform Long-Poll
// Timeout set to 90s (server usually blocks for 60s max)
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
var result api.DiffResult
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
cancel()
if err != nil {
if errors.Is(err, context.Canceled) {
return
}
// Ignore timeout errors as they are normal for long-polling
if !errors.Is(err, context.DeadlineExceeded) {
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
time.Sleep(interval)
}
continue
}
// If result is not 0, reset DiffID to resync
if result.Result != 0 {
diffID = 0
time.Sleep(2 * time.Second)
continue
}
if result.DiffID != 0 {
diffID = result.DiffID
f.lastDiffID = diffID
}
if len(result.Entries) > 0 {
handleChanges(result.Entries)
}
}
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
// EU region supports SHA1 and SHA256 (but rclone doesn't
@@ -1327,7 +1459,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// opts.Body=0), so upload it as a multipart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf, opts.ContentType)
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
if err != nil {
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
}
@@ -1401,6 +1533,7 @@ var (
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1384,7 +1384,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
for i := range iVal.NumField() {
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
}
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name, "application/octet-stream")
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
if err != nil {
return fmt.Errorf("failed to make multipart upload: %w", err)
}

View File

@@ -1,26 +1,26 @@
name: Linode
description: Linode Object Storage
endpoint:
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
ap-south-1.linodeobjects.com: Singapore, ap-south-1
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
acl: {}
bucket_acl: true

View File

@@ -688,7 +688,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
"need_idx_progress": {"true"},
"replace": {"1"},
}
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename), "application/octet-stream")
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
}

View File

@@ -311,6 +311,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
o := &Object{
fs: f,
remote: remote,
mtime: srcObj.mtime,
size: srcObj.size,
}
fromFullPath := path.Join(src.Fs().Root(), srcObj.remote)
toFullPath := path.Join(f.root, remote)
@@ -367,7 +369,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return fs.ErrorDirExists
}
err := f.ensureParentDirectories(ctx, dstRemote)
fullPathSrc := f.buildFullPath(srcRemote)
fullPathSrcUnencoded, err := url.QueryUnescape(fullPathSrc)
if err != nil {
return err
}
fullPathDstUnencoded, err := url.QueryUnescape(fullPath)
if err != nil {
return err
}
err = f.ensureParentDirectories(ctx, dstRemote)
if err != nil {
return err
}
@@ -378,6 +391,15 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
}
_, err = f.Move(ctx, o, dstRemote)
if err == nil {
f.createdDirMu.Lock()
f.createdDirs[fullPathSrcUnencoded] = false
f.createdDirs[fullPathDstUnencoded] = true
f.createdDirMu.Unlock()
}
return err
}

View File

@@ -817,7 +817,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("filename", url.QueryEscape(name))
params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name, "application/octet-stream")
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
if err != nil {
return nil, fmt.Errorf("failed to make multipart upload: %w", err)
}

View File

@@ -43,7 +43,6 @@ docs = [
"compress.md",
"combine.md",
"doi.md",
"drime.md"
"dropbox.md",
"filefabric.md",
"filelu.md",

View File

@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
use |-R| to make them recurse.
List commands prefer a recursive method that uses more memory but fewer
transactions by default. Use |--disable ListR| to suppress the behavior.
See [|--fast-list|](/docs/#fast-list) for more details.
Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes).`, "|", "`")

View File

@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
`--auth-key` is not provided then `serve s3` will allow anonymous
access.
Like all rclone flags `--auth-key` can be set via environment
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
`accessKey,secretKey` has a comma in, this means it needs to be in
quotes.
```console
export RCLONE_AUTH_KEY='"user,pass"'
rclone serve s3 ...
```
Or to supply multiple identities:
```console
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
rclone serve s3 ...
```
Setting this variable without quotes will produce an error.
Please note that some clients may require HTTPS endpoints. See [the
SSL docs](#tls-ssl) for more information.

View File

@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
w.s3Secret = getAuthSecret(opt.AuthKey)
}
authList, err := authlistResolver(opt.AuthKey)
if err != nil {
return nil, fmt.Errorf("parsing auth list failed: %q", err)
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w),
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
gofakes3.WithV4Auth(authList),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
)
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
w._vfs = vfs.New(f, vfsOpt)
if len(opt.AuthKey) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
w.faker.AddAuthKeys(authList)
}
}

View File

@@ -3,6 +3,7 @@ package s3
import (
"context"
"encoding/hex"
"errors"
"io"
"os"
"path"
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
}
}
func authlistResolver(list []string) map[string]string {
func authlistResolver(list []string) (map[string]string, error) {
authList := make(map[string]string)
for _, v := range list {
parts := strings.Split(v, ",")
if len(parts) != 2 {
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
continue
return nil, errors.New("invalid auth pair: expecting a single comma")
}
authList[parts[0]] = parts[1]
}
return authList
return authList, nil
}

View File

@@ -128,7 +128,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Drime" home="https://www.drime.cloud/" config="/drime/" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}

View File

@@ -1058,3 +1058,5 @@ put them back in again. -->
- Tingsong Xu <tingsong.xu@rightcapital.com>
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
- jhasse-shade <jacob@shade.inc>
- vyv03354 <VYV03354@nifty.ne.jp>
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>

View File

@@ -43,7 +43,6 @@ See the following for detailed instructions for
- [Crypt](/crypt/) - to encrypt other remotes
- [DigitalOcean Spaces](/s3/#digitalocean-spaces)
- [Digi Storage](/koofr/#digi-storage)
- [Drime](/drime/)
- [Dropbox](/dropbox/)
- [Enterprise File Fabric](/filefabric/)
- [FileLu Cloud Storage](/filelu/)
@@ -3279,6 +3278,10 @@ The available flags are:
- `mapper` dumps the JSON blobs being sent to the program supplied with
`--metadata-mapper` and received from it. It can be useful for debugging
the metadata mapper interface.
- `curl` dumps the HTTP request as a `curl` command. Can be used with
the other HTTP debugging flags (e.g. `requests`, `bodies`). By
default the auth will be masked - use with `auth` to have the curl
commands with authentication too.
## Filtering

View File

@@ -1,236 +0,0 @@
---
title: "Drime"
description: "Rclone docs for Drime"
versionIntroduced: "v1.73"
---
# {{< icon "fa fa-cloud" >}} Drime
[Drime](https://drime.cloud/) is a cloud storage and transfer service focused
on fast, resilient file delivery. It offers both free and paid tiers with
emphasis on high-speed uploads and link sharing.
The setup Drime you need to log in, go to Settings, Developer, and create a
token to use as an API access key. Give it a sensible name and copy the token
for use in the config.
## Configuration
Here is a run through of `rclone config` to make a remote called `Drime`.
Firstly run:
```console
rclone config
```
Then follow through the interactive setup:
```text
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> Drime
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
XX / Drime
\ (drime)
Storage> drime
Option access_token.
API Access token
You can get this from the web control panel.
Enter a value. Press Enter to leave empty.
access_token> YOUR_API_ACCESS_TOKEN
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: drime
- access_token: YOUR_API_ACCESS_TOKEN
Keep this "remote" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Once configured you can then use `rclone` like this (replace `remote` with the
name you gave your remote):
List directories and files in the top level of your Drime
```console
rclone lsf remote:
```
To copy a local directory to a Drime directory called backup
```console
rclone copy /home/source remote:backup
```
### Modification times and hashes
Drime does not support modification times or hashes.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)
the following characters are also replaced:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| \ | 0x5C | |
File names can also not start or end with the following characters.
These only get replaced if they are the last character in the name:
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
| SP | 0x20 | ␠ |
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
### Root folder ID
You can set the `root_folder_id` for rclone. This is the directory
(identified by its `Folder ID`) that rclone considers to be the root
of your Drime drive.
Normally you will leave this blank and rclone will determine the
correct root to use itself and fill in the value in the config file.
However you can set this to restrict rclone to a specific folder
hierarchy.
In order to do this you will have to find the `Folder ID` of the
directory you wish rclone to display.
You can do this with rclone
```console
$ rclone lsf -Fip --dirs-only remote:
d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
```
The ID to use is the part before the `;` so you could set
```text
root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
```
To restrict rclone to the `Files` directory.
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drime/drime.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options
Here are the Standard options specific to drime (Drime).
#### --drime-access-token
API Access token
You can get this from the web control panel.
Properties:
- Config: access_token
- Env Var: RCLONE_DRIME_ACCESS_TOKEN
- Type: string
- Required: false
### Advanced options
Here are the Advanced options specific to drime (Drime).
#### --drime-root-folder-id
ID of the root folder
Leave this blank normally, rclone will fill it in automatically.
If you want rclone to be restricted to a particular folder you can
fill it in - see the docs for more info.
Properties:
- Config: root_folder_id
- Env Var: RCLONE_DRIME_ROOT_FOLDER_ID
- Type: string
- Required: false
#### --drime-workspace-id
Account ID
Leave this blank normally, rclone will fill it in automatically.
Properties:
- Config: workspace_id
- Env Var: RCLONE_DRIME_WORKSPACE_ID
- Type: string
- Required: false
#### --drime-list-chunk
Number of items to list in each call
Properties:
- Config: list_chunk
- Env Var: RCLONE_DRIME_LIST_CHUNK
- Type: int
- Default: 1000
#### --drime-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_DRIME_ENCODING
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
#### --drime-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_DRIME_DESCRIPTION
- Type: string
- Required: false
<!-- autogenerated options stop -->
## Limitations
Drime only supports filenames up to 255 characters in length, where a
character is a UTF8-byte character.

View File

@@ -23,7 +23,6 @@ Here is an overview of the major features of each cloud storage system.
| Box | SHA1 | R/W | Yes | No | - | - |
| Citrix ShareFile | MD5 | R/W | Yes | No | - | - |
| Cloudinary | MD5 | R | No | Yes | - | - |
| Drime | - | - | No | No | R/W | - |
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
@@ -516,7 +515,6 @@ upon backend-specific capabilities.
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
| Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes |
| Drime | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
| Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| Cloudinary | No | No | No | No | No | No | Yes | No | No | No | No |
| Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes |
@@ -543,7 +541,7 @@ upon backend-specific capabilities.
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |

View File

@@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
value in the config file.
### Change notifications and mounts
The pCloud backend supports realtime updates for rclone mounts via change
notifications. rclone uses pClouds diff longpolling API to detect changes and
will automatically refresh directory listings in the mounted filesystem when
changes occur.
Notes and behavior:
- Works automatically when using `rclone mount` and requires no additional
configuration.
- Notifications are directoryscoped: when rclone detects a change, it refreshes
the affected directory so new/removed/renamed files become visible promptly.
- Updates are near realtime. The backend uses a longpoll with short fallback
polling intervals, so you should see changes appear quickly without manual
refreshes.
If you want to debug or verify notifications, you can use the helper command:
```bash
rclone test changenotify remote:
```
This will log incoming change notifications for the given remote.
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options

View File

@@ -66,7 +66,6 @@
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a>
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a>
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a>
<a class="dropdown-item" href="/drime/"><i class="fab fa-cloud fa-fw"></i> Drime</a>
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>

View File

@@ -14,6 +14,7 @@ const (
DumpGoRoutines
DumpOpenFiles
DumpMapper
DumpCurl
)
type dumpChoices struct{}
@@ -29,6 +30,7 @@ func (dumpChoices) Choices() []BitsChoicesInfo {
{uint64(DumpGoRoutines), "goroutines"},
{uint64(DumpOpenFiles), "openfiles"},
{uint64(DumpMapper), "mapper"},
{uint64(DumpCurl), "curl"},
}
}

View File

@@ -15,6 +15,8 @@ import (
"net/http/httputil"
"net/url"
"os"
"slices"
"strings"
"sync"
"time"
@@ -24,6 +26,7 @@ import (
"github.com/rclone/rclone/lib/structs"
"github.com/youmark/pkcs8"
"golang.org/x/net/publicsuffix"
"moul.io/http2curl/v2"
)
const (
@@ -439,6 +442,18 @@ func cleanAuths(buf []byte) []byte {
return buf
}
// cleanCurl gets rid of Auth headers in a curl command
func cleanCurl(cmd *http2curl.CurlCommand) {
for _, authBuf := range authBufs {
auth := "'" + string(authBuf)
for i, arg := range *cmd {
if strings.HasPrefix(arg, auth) {
(*cmd)[i] = auth + "XXXX'"
}
}
}
}
var expireWindow = 30 * time.Second
func isCertificateExpired(cc *tls.Config) bool {
@@ -492,6 +507,26 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
fs.Debugf(nil, "%s", separatorReq)
logMutex.Unlock()
}
// Dump curl request
if t.dump&(fs.DumpCurl) != 0 {
cmd, err := http2curl.GetCurlCommand(req)
if err != nil {
fs.Debugf(nil, "Failed to create curl command: %v", err)
} else {
// Patch -X HEAD into --head
for i := range len(*cmd) - 1 {
if (*cmd)[i] == "-X" && (*cmd)[i+1] == "'HEAD'" {
(*cmd)[i] = "--head"
*cmd = slices.Delete(*cmd, i+1, i+2)
break
}
}
if t.dump&fs.DumpAuth == 0 {
cleanCurl(cmd)
}
fs.Debugf(nil, "HTTP REQUEST: %v", cmd)
}
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Logf response

View File

@@ -19,6 +19,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"moul.io/http2curl/v2"
)
func TestCleanAuth(t *testing.T) {
@@ -61,6 +62,32 @@ func TestCleanAuths(t *testing.T) {
}
}
func TestCleanCurl(t *testing.T) {
for _, test := range []struct {
in []string
want []string
}{{
[]string{""},
[]string{""},
}, {
[]string{"floo"},
[]string{"floo"},
}, {
[]string{"'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'Authorization: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Authorization: XXXX'", "'Potato: Help'", ""},
}} {
in := http2curl.CurlCommand(test.in)
cleanCurl(&in)
assert.Equal(t, test.want, test.in, test.in)
}
}
var certSerial = int64(0)
// Create a test certificate and key pair that is valid for a specific

View File

@@ -561,7 +561,7 @@ func TestUploadFile(t *testing.T) {
assert.NoError(t, currentFile.Close())
}()
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName, "application/octet-stream")
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq := httptest.NewRequest("POST", "/", formReader)
@@ -587,7 +587,7 @@ func TestUploadFile(t *testing.T) {
assert.NoError(t, currentFile2.Close())
}()
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName, "application/octet-stream")
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile2, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq = httptest.NewRequest("POST", "/", formReader)

View File

@@ -677,8 +677,3 @@ backends:
# with the parent backend having a different precision.
- TestServerSideCopyOverSelf
- TestServerSideMoveOverSelf
- backend: "drime"
remote: "TestDrime:"
ignoretests:
- TestBisyncRemoteLocal/check_access_filters
fastlist: false

View File

@@ -361,6 +361,9 @@ func (dc *DirCache) RootParentID(ctx context.Context, create bool) (ID string, e
} else if dc.rootID == dc.trueRootID {
return "", errors.New("is root directory")
}
if dc.rootParentID == "" {
return "", errors.New("internal error: didn't find rootParentID")
}
return dc.rootParentID, nil
}

View File

@@ -14,7 +14,6 @@ import (
"maps"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"sync"
@@ -146,7 +145,6 @@ type Opts struct {
MultipartMetadataName string // ..this is used for the name of the metadata form part if set
MultipartContentName string // ..name of the parameter which is the attached file
MultipartFileName string // ..name of the file for the attached file
MultipartContentType string // ..content type of the attached file
Parameters url.Values // any parameters for the final URL
TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding
Trailer *http.Header // set the request trailer
@@ -373,17 +371,6 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
return resp, nil
}
// CreateFormFile is a convenience wrapper around [Writer.CreatePart]. It creates
// a new form-data header with the provided field name and file name.
func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string) (io.Writer, error) {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition", multipart.FileContentDisposition(fieldname, filename))
if contentType != "" {
h.Set("Content-Type", contentType)
}
return w.CreatePart(h)
}
// MultipartUpload creates an io.Reader which produces an encoded a
// multipart form upload from the params passed in and the passed in
//
@@ -395,10 +382,10 @@ func CreateFormFile(w *multipart.Writer, fieldname, filename, contentType string
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
//
// NB This doesn't allow setting the content type of the attachment
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string, contentType string) (io.ReadCloser, string, int64, error) {
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
bodyReader, bodyWriter := io.Pipe()
writer := multipart.NewWriter(bodyWriter)
formContentType := writer.FormDataContentType()
contentType := writer.FormDataContentType()
// Create a Multipart Writer as base for calculating the Content-Length
buf := &bytes.Buffer{}
@@ -417,7 +404,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
}
}
if in != nil {
_, err = CreateFormFile(dummyMultipartWriter, contentName, fileName, contentType)
_, err = dummyMultipartWriter.CreateFormFile(contentName, fileName)
if err != nil {
return nil, "", 0, err
}
@@ -458,7 +445,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
}
if in != nil {
part, err := CreateFormFile(writer, contentName, fileName, contentType)
part, err := writer.CreateFormFile(contentName, fileName)
if err != nil {
_ = bodyWriter.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
@@ -480,7 +467,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
_ = bodyWriter.Close()
}()
return bodyReader, formContentType, multipartLength, nil
return bodyReader, contentType, multipartLength, nil
}
// CallJSON runs Call and decodes the body as a JSON object into response (if not nil)
@@ -552,7 +539,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, respo
opts = opts.Copy()
var overhead int64
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName, opts.MultipartContentType)
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
if err != nil {
return nil, err
}