mirror of
https://github.com/rclone/rclone.git
synced 2025-12-22 11:13:23 +00:00
Compare commits
8 Commits
fix-9031-b
...
dump-curl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b141a553be | ||
|
|
f81cd7d279 | ||
|
|
1a0a4628d7 | ||
|
|
c10a4d465c | ||
|
|
3a6e07a613 | ||
|
|
c36f99d343 | ||
|
|
3e21a7261b | ||
|
|
fd439fab62 |
@@ -133,23 +133,32 @@ type File struct {
|
|||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
}
|
}
|
||||||
|
|
||||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
// StorageAPI is as returned from the b2_authorize_account call
|
||||||
type AuthorizeAccountResponse struct {
|
type StorageAPI struct {
|
||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
ID string `json:"id"` // ID of bucket
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||||
|
} `json:"buckets"`
|
||||||
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
|
||||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||||
|
type AuthorizeAccountResponse struct {
|
||||||
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
|
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||||
|
Storage StorageAPI `json:"storageApi"`
|
||||||
|
} `json:"apiInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||||
type ListBucketsRequest struct {
|
type ListBucketsRequest struct {
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
|
|||||||
131
backend/b2/b2.go
131
backend/b2/b2.go
@@ -607,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||||
}
|
}
|
||||||
// If this is a key limited to a single bucket, it must exist already
|
// If this is a key limited to one or more buckets, one of them must exist
|
||||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
// and be ours.
|
||||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||||
if allowedBucket == "" {
|
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
var rootFound = false
|
||||||
|
var rootID string
|
||||||
|
for _, b := range buckets {
|
||||||
|
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||||
|
if allowedBucket == "" {
|
||||||
|
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if allowedBucket == f.rootBucket {
|
||||||
|
rootFound = true
|
||||||
|
rootID = b.ID
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if allowedBucket != f.rootBucket {
|
if !rootFound {
|
||||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||||
}
|
}
|
||||||
f.cache.MarkOK(f.rootBucket)
|
f.cache.MarkOK(f.rootBucket)
|
||||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
f.setBucketID(f.rootBucket, rootID)
|
||||||
}
|
}
|
||||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the (bucket,directory) is actually an existing file
|
// Check to see if the (bucket,directory) is actually an existing file
|
||||||
@@ -643,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
defer f.authMu.Unlock()
|
defer f.authMu.Unlock()
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/b2api/v1/b2_authorize_account",
|
Path: "/b2api/v4/b2_authorize_account",
|
||||||
RootURL: f.opt.Endpoint,
|
RootURL: f.opt.Endpoint,
|
||||||
UserName: f.opt.Account,
|
UserName: f.opt.Account,
|
||||||
Password: f.opt.Key,
|
Password: f.opt.Key,
|
||||||
@@ -656,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to authenticate: %w", err)
|
return fmt.Errorf("failed to authenticate: %w", err)
|
||||||
}
|
}
|
||||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||||
func (f *Fs) hasPermission(permission string) bool {
|
func (f *Fs) hasPermission(permission string) bool {
|
||||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||||
@@ -1067,44 +1079,83 @@ type listBucketFn func(*api.Bucket) error
|
|||||||
|
|
||||||
// listBucketsToFn lists the buckets to the function supplied
|
// listBucketsToFn lists the buckets to the function supplied
|
||||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||||
var account = api.ListBucketsRequest{
|
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||||
AccountID: f.info.AccountID,
|
|
||||||
BucketID: f.info.Allowed.BucketID,
|
call := func(id string) error {
|
||||||
}
|
var account = api.ListBucketsRequest{
|
||||||
if bucketName != "" && account.BucketID == "" {
|
AccountID: f.info.AccountID,
|
||||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
BucketID: id,
|
||||||
|
}
|
||||||
|
if bucketName != "" && account.BucketID == "" {
|
||||||
|
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response api.ListBucketsResponse
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/b2_list_buckets",
|
||||||
|
}
|
||||||
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
|
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||||
|
return f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
responses = append(responses, response)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var response api.ListBucketsResponse
|
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||||
opts := rest.Opts{
|
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||||
Method: "POST",
|
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||||
Path: "/b2_list_buckets",
|
// for multi-bucket API keys.
|
||||||
|
if b.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// When requesting a specific bucket skip over non-matching names
|
||||||
|
if bucketName != "" && b.Name != bucketName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := call(b.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
|
||||||
return f.shouldRetry(ctx, resp, err)
|
err := call("")
|
||||||
})
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f.bucketIDMutex.Lock()
|
f.bucketIDMutex.Lock()
|
||||||
f.bucketTypeMutex.Lock()
|
f.bucketTypeMutex.Lock()
|
||||||
f._bucketID = make(map[string]string, 1)
|
f._bucketID = make(map[string]string, 1)
|
||||||
f._bucketType = make(map[string]string, 1)
|
f._bucketType = make(map[string]string, 1)
|
||||||
for i := range response.Buckets {
|
|
||||||
bucket := &response.Buckets[i]
|
for ri := range responses {
|
||||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
response := &responses[ri]
|
||||||
f.cache.MarkOK(bucket.Name)
|
for i := range response.Buckets {
|
||||||
f._bucketID[bucket.Name] = bucket.ID
|
bucket := &response.Buckets[i]
|
||||||
f._bucketType[bucket.Name] = bucket.Type
|
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||||
|
f.cache.MarkOK(bucket.Name)
|
||||||
|
f._bucketID[bucket.Name] = bucket.ID
|
||||||
|
f._bucketType[bucket.Name] = bucket.Type
|
||||||
|
}
|
||||||
}
|
}
|
||||||
f.bucketTypeMutex.Unlock()
|
f.bucketTypeMutex.Unlock()
|
||||||
f.bucketIDMutex.Unlock()
|
f.bucketIDMutex.Unlock()
|
||||||
for i := range response.Buckets {
|
for ri := range responses {
|
||||||
bucket := &response.Buckets[i]
|
response := &responses[ri]
|
||||||
err = fn(bucket)
|
for i := range response.Buckets {
|
||||||
if err != nil {
|
bucket := &response.Buckets[i]
|
||||||
return err
|
err := fn(bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1606,7 +1657,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
bucket, bucketPath := f.split(remote)
|
bucket, bucketPath := f.split(remote)
|
||||||
var RootURL string
|
var RootURL string
|
||||||
if f.opt.DownloadURL == "" {
|
if f.opt.DownloadURL == "" {
|
||||||
RootURL = f.info.DownloadURL
|
RootURL = f.info.APIs.Storage.DownloadURL
|
||||||
} else {
|
} else {
|
||||||
RootURL = f.opt.DownloadURL
|
RootURL = f.opt.DownloadURL
|
||||||
}
|
}
|
||||||
@@ -1957,7 +2008,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||||
// otherwise use the custom downloadUrl
|
// otherwise use the custom downloadUrl
|
||||||
if o.fs.opt.DownloadURL == "" {
|
if o.fs.opt.DownloadURL == "" {
|
||||||
opts.RootURL = o.fs.info.DownloadURL
|
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||||
} else {
|
} else {
|
||||||
opts.RootURL = o.fs.opt.DownloadURL
|
opts.RootURL = o.fs.opt.DownloadURL
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@@ -24,7 +25,8 @@ import (
|
|||||||
var (
|
var (
|
||||||
hashType = hash.MD5
|
hashType = hash.MD5
|
||||||
// the object storage is persistent
|
// the object storage is persistent
|
||||||
buckets = newBucketsInfo()
|
buckets = newBucketsInfo()
|
||||||
|
errWriteOnly = errors.New("can't read when using --memory-discard")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -33,12 +35,32 @@ func init() {
|
|||||||
Name: "memory",
|
Name: "memory",
|
||||||
Description: "In memory object storage system.",
|
Description: "In memory object storage system.",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{},
|
Options: []fs.Option{{
|
||||||
|
Name: "discard",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Help: `If set all writes will be discarded and reads will return an error
|
||||||
|
|
||||||
|
If set then when files are uploaded the contents not be saved. The
|
||||||
|
files will appear to have been uploaded but will give an error on
|
||||||
|
read. Files will have their MD5 sum calculated on upload which takes
|
||||||
|
very little CPU time and allows the transfers to be checked.
|
||||||
|
|
||||||
|
This can be useful for testing performance.
|
||||||
|
|
||||||
|
Probably most easily used by using the connection string syntax:
|
||||||
|
|
||||||
|
:memory,discard:bucket
|
||||||
|
|
||||||
|
`,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct{}
|
type Options struct {
|
||||||
|
Discard bool `config:"discard"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote memory server
|
// Fs represents a remote memory server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
@@ -164,6 +186,7 @@ type objectData struct {
|
|||||||
hash string
|
hash string
|
||||||
mimeType string
|
mimeType string
|
||||||
data []byte
|
data []byte
|
||||||
|
size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a memory object
|
// Object describes a memory object
|
||||||
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
if t != hashType {
|
if t != hashType {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
if o.od.hash == "" {
|
if o.od.hash == "" && !o.fs.opt.Discard {
|
||||||
sum := md5.Sum(o.od.data)
|
sum := md5.Sum(o.od.data)
|
||||||
o.od.hash = hex.EncodeToString(sum[:])
|
o.od.hash = hex.EncodeToString(sum[:])
|
||||||
}
|
}
|
||||||
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
return int64(len(o.od.data))
|
return o.od.size
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
|
if o.fs.opt.Discard {
|
||||||
|
return nil, errWriteOnly
|
||||||
|
}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
data, err := io.ReadAll(in)
|
var data []byte
|
||||||
|
var size int64
|
||||||
|
var hash string
|
||||||
|
if o.fs.opt.Discard {
|
||||||
|
h := md5.New()
|
||||||
|
size, err = io.Copy(h, in)
|
||||||
|
hash = hex.EncodeToString(h.Sum(nil))
|
||||||
|
} else {
|
||||||
|
data, err = io.ReadAll(in)
|
||||||
|
size = int64(len(data))
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to update memory object: %w", err)
|
return fmt.Errorf("failed to update memory object: %w", err)
|
||||||
}
|
}
|
||||||
o.od = &objectData{
|
o.od = &objectData{
|
||||||
data: data,
|
data: data,
|
||||||
hash: "",
|
size: size,
|
||||||
|
hash: hash,
|
||||||
modTime: src.ModTime(ctx),
|
modTime: src.ModTime(ctx),
|
||||||
mimeType: fs.MimeType(ctx, src),
|
mimeType: fs.MimeType(ctx, src),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -222,3 +222,11 @@ type UserInfo struct {
|
|||||||
} `json:"steps"`
|
} `json:"steps"`
|
||||||
} `json:"journey"`
|
} `json:"journey"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiffResult is the response from /diff
|
||||||
|
type DiffResult struct {
|
||||||
|
Result int `json:"result"`
|
||||||
|
DiffID int64 `json:"diffid"`
|
||||||
|
Entries []map[string]any `json:"entries"`
|
||||||
|
Error string `json:"error"`
|
||||||
|
}
|
||||||
|
|||||||
@@ -171,6 +171,7 @@ type Fs struct {
|
|||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
|
lastDiffID int64 // change tracking state for diff long-polling
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a pcloud object
|
// Object describes a pcloud object
|
||||||
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChangeNotify implements fs.Features.ChangeNotify
|
||||||
|
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
|
// Start long-poll loop in background
|
||||||
|
go f.changeNotifyLoop(ctx, notify, ch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// changeNotifyLoop contains the blocking long-poll logic.
|
||||||
|
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
|
// Standard polling interval
|
||||||
|
interval := 30 * time.Second
|
||||||
|
|
||||||
|
// Start with diffID = 0 to get the current state
|
||||||
|
var diffID int64
|
||||||
|
|
||||||
|
// Helper to process changes from the diff API
|
||||||
|
handleChanges := func(entries []map[string]any) {
|
||||||
|
notifiedPaths := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
meta, ok := entry["metadata"].(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Robust extraction of ParentFolderID
|
||||||
|
var pid int64
|
||||||
|
if val, ok := meta["parentfolderid"]; ok {
|
||||||
|
switch v := val.(type) {
|
||||||
|
case float64:
|
||||||
|
pid = int64(v)
|
||||||
|
case int64:
|
||||||
|
pid = v
|
||||||
|
case int:
|
||||||
|
pid = int64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the path using dirCache.GetInv
|
||||||
|
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
|
||||||
|
dirID := fmt.Sprintf("d%d", pid)
|
||||||
|
parentPath, ok := f.dirCache.GetInv(dirID)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
// Parent not in cache, so we can ignore this change as it is outside
|
||||||
|
// of what the mount has seen or cares about.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name, _ := meta["name"].(string)
|
||||||
|
fullPath := path.Join(parentPath, name)
|
||||||
|
|
||||||
|
// Determine EntryType (File or Directory)
|
||||||
|
entryType := fs.EntryObject
|
||||||
|
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
|
||||||
|
entryType = fs.EntryDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deduplicate notifications for this batch
|
||||||
|
if !notifiedPaths[fullPath] {
|
||||||
|
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
|
||||||
|
notify(fullPath, entryType)
|
||||||
|
notifiedPaths[fullPath] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Check context and channel
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case newInterval, ok := <-ch:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interval = newInterval
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setup /diff Request
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/diff",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if diffID != 0 {
|
||||||
|
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
|
||||||
|
opts.Parameters.Set("block", "1")
|
||||||
|
} else {
|
||||||
|
opts.Parameters.Set("last", "0")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform Long-Poll
|
||||||
|
// Timeout set to 90s (server usually blocks for 60s max)
|
||||||
|
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||||
|
var result api.DiffResult
|
||||||
|
|
||||||
|
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Ignore timeout errors as they are normal for long-polling
|
||||||
|
if !errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
|
||||||
|
time.Sleep(interval)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If result is not 0, reset DiffID to resync
|
||||||
|
if result.Result != 0 {
|
||||||
|
diffID = 0
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if result.DiffID != 0 {
|
||||||
|
diffID = result.DiffID
|
||||||
|
f.lastDiffID = diffID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(result.Entries) > 0 {
|
||||||
|
handleChanges(result.Entries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
||||||
@@ -1401,6 +1533,7 @@ var (
|
|||||||
_ fs.ListPer = (*Fs)(nil)
|
_ fs.ListPer = (*Fs)(nil)
|
||||||
_ fs.Abouter = (*Fs)(nil)
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Shutdowner = (*Fs)(nil)
|
_ fs.Shutdowner = (*Fs)(nil)
|
||||||
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -311,6 +311,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
|
mtime: srcObj.mtime,
|
||||||
|
size: srcObj.size,
|
||||||
}
|
}
|
||||||
fromFullPath := path.Join(src.Fs().Root(), srcObj.remote)
|
fromFullPath := path.Join(src.Fs().Root(), srcObj.remote)
|
||||||
toFullPath := path.Join(f.root, remote)
|
toFullPath := path.Join(f.root, remote)
|
||||||
@@ -367,7 +369,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
return fs.ErrorDirExists
|
return fs.ErrorDirExists
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.ensureParentDirectories(ctx, dstRemote)
|
fullPathSrc := f.buildFullPath(srcRemote)
|
||||||
|
fullPathSrcUnencoded, err := url.QueryUnescape(fullPathSrc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fullPathDstUnencoded, err := url.QueryUnescape(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = f.ensureParentDirectories(ctx, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -378,6 +391,15 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
}
|
}
|
||||||
|
|
||||||
_, err = f.Move(ctx, o, dstRemote)
|
_, err = f.Move(ctx, o, dstRemote)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
|
||||||
|
f.createdDirMu.Lock()
|
||||||
|
f.createdDirs[fullPathSrcUnencoded] = false
|
||||||
|
f.createdDirs[fullPathDstUnencoded] = true
|
||||||
|
f.createdDirMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
|||||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||||
use |-R| to make them recurse.
|
use |-R| to make them recurse.
|
||||||
|
|
||||||
|
List commands prefer a recursive method that uses more memory but fewer
|
||||||
|
transactions by default. Use |--disable ListR| to suppress the behavior.
|
||||||
|
See [|--fast-list|](/docs/#fast-list) for more details.
|
||||||
|
|
||||||
Listing a nonexistent directory will produce an error except for
|
Listing a nonexistent directory will produce an error except for
|
||||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||||
the bucket-based remotes).`, "|", "`")
|
the bucket-based remotes).`, "|", "`")
|
||||||
|
|||||||
@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
|||||||
`--auth-key` is not provided then `serve s3` will allow anonymous
|
`--auth-key` is not provided then `serve s3` will allow anonymous
|
||||||
access.
|
access.
|
||||||
|
|
||||||
|
Like all rclone flags `--auth-key` can be set via environment
|
||||||
|
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
|
||||||
|
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
|
||||||
|
`accessKey,secretKey` has a comma in, this means it needs to be in
|
||||||
|
quotes.
|
||||||
|
|
||||||
|
```console
|
||||||
|
export RCLONE_AUTH_KEY='"user,pass"'
|
||||||
|
rclone serve s3 ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Or to supply multiple identities:
|
||||||
|
|
||||||
|
```console
|
||||||
|
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
|
||||||
|
rclone serve s3 ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Setting this variable without quotes will produce an error.
|
||||||
|
|
||||||
Please note that some clients may require HTTPS endpoints. See [the
|
Please note that some clients may require HTTPS endpoints. See [the
|
||||||
SSL docs](#tls-ssl) for more information.
|
SSL docs](#tls-ssl) for more information.
|
||||||
|
|
||||||
|
|||||||
@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
w.s3Secret = getAuthSecret(opt.AuthKey)
|
w.s3Secret = getAuthSecret(opt.AuthKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
authList, err := authlistResolver(opt.AuthKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing auth list failed: %q", err)
|
||||||
|
}
|
||||||
|
|
||||||
var newLogger logger
|
var newLogger logger
|
||||||
w.faker = gofakes3.New(
|
w.faker = gofakes3.New(
|
||||||
newBackend(w),
|
newBackend(w),
|
||||||
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
gofakes3.WithLogger(newLogger),
|
gofakes3.WithLogger(newLogger),
|
||||||
gofakes3.WithRequestID(rand.Uint64()),
|
gofakes3.WithRequestID(rand.Uint64()),
|
||||||
gofakes3.WithoutVersioning(),
|
gofakes3.WithoutVersioning(),
|
||||||
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
|
gofakes3.WithV4Auth(authList),
|
||||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
|||||||
w._vfs = vfs.New(f, vfsOpt)
|
w._vfs = vfs.New(f, vfsOpt)
|
||||||
|
|
||||||
if len(opt.AuthKey) > 0 {
|
if len(opt.AuthKey) > 0 {
|
||||||
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
|
w.faker.AddAuthKeys(authList)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package s3
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func authlistResolver(list []string) map[string]string {
|
func authlistResolver(list []string) (map[string]string, error) {
|
||||||
authList := make(map[string]string)
|
authList := make(map[string]string)
|
||||||
for _, v := range list {
|
for _, v := range list {
|
||||||
parts := strings.Split(v, ",")
|
parts := strings.Split(v, ",")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
|
return nil, errors.New("invalid auth pair: expecting a single comma")
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
authList[parts[0]] = parts[1]
|
authList[parts[0]] = parts[1]
|
||||||
}
|
}
|
||||||
return authList
|
return authList, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1058,3 +1058,5 @@ put them back in again. -->
|
|||||||
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
- Tingsong Xu <tingsong.xu@rightcapital.com>
|
||||||
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
|
||||||
- jhasse-shade <jacob@shade.inc>
|
- jhasse-shade <jacob@shade.inc>
|
||||||
|
- vyv03354 <VYV03354@nifty.ne.jp>
|
||||||
|
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
|
||||||
|
|||||||
@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
|||||||
All copy commands send the following 4 requests:
|
All copy commands send the following 4 requests:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
/b2api/v1/b2_authorize_account
|
/b2api/v4/b2_authorize_account
|
||||||
/b2api/v1/b2_create_bucket
|
/b2api/v1/b2_create_bucket
|
||||||
/b2api/v1/b2_list_buckets
|
/b2api/v1/b2_list_buckets
|
||||||
/b2api/v1/b2_list_file_names
|
/b2api/v1/b2_list_file_names
|
||||||
|
|||||||
@@ -3278,6 +3278,10 @@ The available flags are:
|
|||||||
- `mapper` dumps the JSON blobs being sent to the program supplied with
|
- `mapper` dumps the JSON blobs being sent to the program supplied with
|
||||||
`--metadata-mapper` and received from it. It can be useful for debugging
|
`--metadata-mapper` and received from it. It can be useful for debugging
|
||||||
the metadata mapper interface.
|
the metadata mapper interface.
|
||||||
|
- `curl` dumps the HTTP request as a `curl` command. Can be used with
|
||||||
|
the other HTTP debugging flags (e.g. `requests`, `bodies`). By
|
||||||
|
default the auth will be masked - use with `auth` to have the curl
|
||||||
|
commands with authentication too.
|
||||||
|
|
||||||
## Filtering
|
## Filtering
|
||||||
|
|
||||||
|
|||||||
@@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return
|
|||||||
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
|
||||||
value in the config file.
|
value in the config file.
|
||||||
|
|
||||||
|
### Change notifications and mounts
|
||||||
|
|
||||||
|
The pCloud backend supports real‑time updates for rclone mounts via change
|
||||||
|
notifications. rclone uses pCloud’s diff long‑polling API to detect changes and
|
||||||
|
will automatically refresh directory listings in the mounted filesystem when
|
||||||
|
changes occur.
|
||||||
|
|
||||||
|
Notes and behavior:
|
||||||
|
|
||||||
|
- Works automatically when using `rclone mount` and requires no additional
|
||||||
|
configuration.
|
||||||
|
- Notifications are directory‑scoped: when rclone detects a change, it refreshes
|
||||||
|
the affected directory so new/removed/renamed files become visible promptly.
|
||||||
|
- Updates are near real‑time. The backend uses a long‑poll with short fallback
|
||||||
|
polling intervals, so you should see changes appear quickly without manual
|
||||||
|
refreshes.
|
||||||
|
|
||||||
|
If you want to debug or verify notifications, you can use the helper command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rclone test changenotify remote:
|
||||||
|
```
|
||||||
|
|
||||||
|
This will log incoming change notifications for the given remote.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ const (
|
|||||||
DumpGoRoutines
|
DumpGoRoutines
|
||||||
DumpOpenFiles
|
DumpOpenFiles
|
||||||
DumpMapper
|
DumpMapper
|
||||||
|
DumpCurl
|
||||||
)
|
)
|
||||||
|
|
||||||
type dumpChoices struct{}
|
type dumpChoices struct{}
|
||||||
@@ -29,6 +30,7 @@ func (dumpChoices) Choices() []BitsChoicesInfo {
|
|||||||
{uint64(DumpGoRoutines), "goroutines"},
|
{uint64(DumpGoRoutines), "goroutines"},
|
||||||
{uint64(DumpOpenFiles), "openfiles"},
|
{uint64(DumpOpenFiles), "openfiles"},
|
||||||
{uint64(DumpMapper), "mapper"},
|
{uint64(DumpMapper), "mapper"},
|
||||||
|
{uint64(DumpCurl), "curl"},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ import (
|
|||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -24,6 +26,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/structs"
|
"github.com/rclone/rclone/lib/structs"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
"golang.org/x/net/publicsuffix"
|
"golang.org/x/net/publicsuffix"
|
||||||
|
"moul.io/http2curl/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -439,6 +442,18 @@ func cleanAuths(buf []byte) []byte {
|
|||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// cleanCurl gets rid of Auth headers in a curl command
|
||||||
|
func cleanCurl(cmd *http2curl.CurlCommand) {
|
||||||
|
for _, authBuf := range authBufs {
|
||||||
|
auth := "'" + string(authBuf)
|
||||||
|
for i, arg := range *cmd {
|
||||||
|
if strings.HasPrefix(arg, auth) {
|
||||||
|
(*cmd)[i] = auth + "XXXX'"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var expireWindow = 30 * time.Second
|
var expireWindow = 30 * time.Second
|
||||||
|
|
||||||
func isCertificateExpired(cc *tls.Config) bool {
|
func isCertificateExpired(cc *tls.Config) bool {
|
||||||
@@ -492,6 +507,26 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
|||||||
fs.Debugf(nil, "%s", separatorReq)
|
fs.Debugf(nil, "%s", separatorReq)
|
||||||
logMutex.Unlock()
|
logMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
// Dump curl request
|
||||||
|
if t.dump&(fs.DumpCurl) != 0 {
|
||||||
|
cmd, err := http2curl.GetCurlCommand(req)
|
||||||
|
if err != nil {
|
||||||
|
fs.Debugf(nil, "Failed to create curl command: %v", err)
|
||||||
|
} else {
|
||||||
|
// Patch -X HEAD into --head
|
||||||
|
for i := range len(*cmd) - 1 {
|
||||||
|
if (*cmd)[i] == "-X" && (*cmd)[i+1] == "'HEAD'" {
|
||||||
|
(*cmd)[i] = "--head"
|
||||||
|
*cmd = slices.Delete(*cmd, i+1, i+2)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if t.dump&fs.DumpAuth == 0 {
|
||||||
|
cleanCurl(cmd)
|
||||||
|
}
|
||||||
|
fs.Debugf(nil, "HTTP REQUEST: %v", cmd)
|
||||||
|
}
|
||||||
|
}
|
||||||
// Do round trip
|
// Do round trip
|
||||||
resp, err = t.Transport.RoundTrip(req)
|
resp, err = t.Transport.RoundTrip(req)
|
||||||
// Logf response
|
// Logf response
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"moul.io/http2curl/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCleanAuth(t *testing.T) {
|
func TestCleanAuth(t *testing.T) {
|
||||||
@@ -61,6 +62,32 @@ func TestCleanAuths(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCleanCurl(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in []string
|
||||||
|
want []string
|
||||||
|
}{{
|
||||||
|
[]string{""},
|
||||||
|
[]string{""},
|
||||||
|
}, {
|
||||||
|
[]string{"floo"},
|
||||||
|
[]string{"floo"},
|
||||||
|
}, {
|
||||||
|
[]string{"'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'Authorization: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}, {
|
||||||
|
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'X-Auth-Token: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}, {
|
||||||
|
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
|
||||||
|
[]string{"'X-Auth-Token: XXXX'", "'Authorization: XXXX'", "'Potato: Help'", ""},
|
||||||
|
}} {
|
||||||
|
in := http2curl.CurlCommand(test.in)
|
||||||
|
cleanCurl(&in)
|
||||||
|
assert.Equal(t, test.want, test.in, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var certSerial = int64(0)
|
var certSerial = int64(0)
|
||||||
|
|
||||||
// Create a test certificate and key pair that is valid for a specific
|
// Create a test certificate and key pair that is valid for a specific
|
||||||
|
|||||||
Reference in New Issue
Block a user