mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
83 Commits
pr-8538-tr
...
v1.70-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4133a197bc | ||
|
|
a30a4909fe | ||
|
|
cdc6d22929 | ||
|
|
e319406f52 | ||
|
|
ac54cccced | ||
|
|
4c4d366e29 | ||
|
|
64fc3d05ae | ||
|
|
90386efeb1 | ||
|
|
5f78b47295 | ||
|
|
775ee90fa5 | ||
|
|
444392bf9c | ||
|
|
d36259749f | ||
|
|
4010380ea8 | ||
|
|
c138e52a57 | ||
|
|
e22ce597ad | ||
|
|
79bd9e7913 | ||
|
|
32f9393ac8 | ||
|
|
f97c876eb1 | ||
|
|
9b43836e19 | ||
|
|
ff817e8764 | ||
|
|
3c63dec849 | ||
|
|
33876c5806 | ||
|
|
fa3b444341 | ||
|
|
e5fc424955 | ||
|
|
06badeffa3 | ||
|
|
eb71d1be18 | ||
|
|
7506a3c84c | ||
|
|
831abd3406 | ||
|
|
9c08cd80c7 | ||
|
|
948db193a2 | ||
|
|
72bc3f5079 | ||
|
|
bf8a428fbd | ||
|
|
05cc6f829b | ||
|
|
af73833773 | ||
|
|
3167a63780 | ||
|
|
1d9795daa6 | ||
|
|
03ea89adf0 | ||
|
|
9d464e8e9a | ||
|
|
92fea7eb1b | ||
|
|
f226d12a2f | ||
|
|
359260c49d | ||
|
|
125c8a98bb | ||
|
|
81fccd9c39 | ||
|
|
1dc3421c7f | ||
|
|
073184132e | ||
|
|
476ff65fd7 | ||
|
|
2847412433 | ||
|
|
5c81132da0 | ||
|
|
6e1c7b9239 | ||
|
|
e469c8974c | ||
|
|
629b427443 | ||
|
|
108504963c | ||
|
|
6aa09fb1d6 | ||
|
|
bfa6852334 | ||
|
|
63d55d4a39 | ||
|
|
578ee49550 | ||
|
|
dda6a863e9 | ||
|
|
99358cee88 | ||
|
|
768a4236e6 | ||
|
|
ffbf002ba8 | ||
|
|
4a1b5b864c | ||
|
|
3b3096c940 | ||
|
|
51fd697c7a | ||
|
|
210acb42cd | ||
|
|
6c36615efe | ||
|
|
d4e2717081 | ||
|
|
013c563293 | ||
|
|
41a407dcc9 | ||
|
|
cf1f5a7af6 | ||
|
|
597872e5d7 | ||
|
|
e2d6872745 | ||
|
|
ddebca8d42 | ||
|
|
5173ca0454 | ||
|
|
ccac9813f3 | ||
|
|
9133fd03df | ||
|
|
2e891f4ff8 | ||
|
|
3c66d9ccb1 | ||
|
|
badf16cc34 | ||
|
|
0ee7cd80f2 | ||
|
|
aeb43c6a4c | ||
|
|
12322a2141 | ||
|
|
4fd5a3d0a2 | ||
|
|
3594330177 |
4031
MANUAL.html
generated
4031
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3369
MANUAL.txt
generated
3369
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -39,7 +39,9 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
@@ -64,7 +66,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
@@ -94,6 +97,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
|
||||
@@ -14,10 +14,12 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
|
||||
@@ -72,6 +72,7 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
sasCopyValidity = time.Hour // how long SAS should last when doing server side copy
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -559,6 +560,11 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
publicAccess container.PublicAccessType // Container Public Access Level
|
||||
|
||||
// user delegation cache
|
||||
userDelegationMu sync.Mutex
|
||||
userDelegation *service.UserDelegationCredential
|
||||
userDelegationExpiry time.Time
|
||||
}
|
||||
|
||||
// Object describes an azure object
|
||||
@@ -612,6 +618,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") {
|
||||
containerPath = containerPath[:len(containerPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
@@ -928,6 +937,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -1213,7 +1223,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
@@ -1534,7 +1544,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
@@ -1684,6 +1694,38 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.deleteContainer(ctx, container)
|
||||
}
|
||||
|
||||
// Get a user delegation which is valid for at least sasCopyValidity
|
||||
//
|
||||
// This value is cached in f
|
||||
func (f *Fs) getUserDelegation(ctx context.Context) (*service.UserDelegationCredential, error) {
|
||||
f.userDelegationMu.Lock()
|
||||
defer f.userDelegationMu.Unlock()
|
||||
|
||||
if f.userDelegation != nil && time.Until(f.userDelegationExpiry) > sasCopyValidity {
|
||||
return f.userDelegation, nil
|
||||
}
|
||||
|
||||
// Validity window
|
||||
start := time.Now().UTC()
|
||||
expiry := start.Add(2 * sasCopyValidity)
|
||||
startStr := start.Format(time.RFC3339)
|
||||
expiryStr := expiry.Format(time.RFC3339)
|
||||
|
||||
// Acquire user delegation key from the service client
|
||||
info := service.KeyInfo{
|
||||
Start: &startStr,
|
||||
Expiry: &expiryStr,
|
||||
}
|
||||
userDelegationKey, err := f.svc.GetUserDelegationCredential(ctx, info, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get user delegation key: %w", err)
|
||||
}
|
||||
|
||||
f.userDelegation = userDelegationKey
|
||||
f.userDelegationExpiry = expiry
|
||||
return f.userDelegation, nil
|
||||
}
|
||||
|
||||
// getAuth gets auth to copy o.
|
||||
//
|
||||
// tokenOK is used to signal that token based auth (Microsoft Entra
|
||||
@@ -1695,7 +1737,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// URL (not a SAS) and token will be empty.
|
||||
//
|
||||
// If tokenOK is true it may also return a token for the auth.
|
||||
func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL string, token *string, err error) {
|
||||
func (o *Object) getAuth(ctx context.Context, noAuth bool) (srcURL string, err error) {
|
||||
f := o.fs
|
||||
srcBlobSVC := o.getBlobSVC()
|
||||
srcURL = srcBlobSVC.URL()
|
||||
@@ -1704,29 +1746,47 @@ func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL
|
||||
case noAuth:
|
||||
// If same storage account then no auth needed
|
||||
case f.cred != nil:
|
||||
if !tokenOK {
|
||||
return srcURL, token, errors.New("not supported: Microsoft Entra ID")
|
||||
}
|
||||
options := policy.TokenRequestOptions{}
|
||||
accessToken, err := f.cred.GetToken(ctx, options)
|
||||
// Generate a User Delegation SAS URL using Azure AD credentials
|
||||
userDelegationKey, err := f.getUserDelegation(ctx)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create access token: %w", err)
|
||||
return "", fmt.Errorf("sas creation: %w", err)
|
||||
}
|
||||
token = &accessToken.Token
|
||||
|
||||
// Build the SAS values
|
||||
perms := sas.BlobPermissions{Read: true}
|
||||
container, containerPath := o.split()
|
||||
start := time.Now().UTC()
|
||||
expiry := start.Add(sasCopyValidity)
|
||||
vals := sas.BlobSignatureValues{
|
||||
StartTime: start,
|
||||
ExpiryTime: expiry,
|
||||
Permissions: perms.String(),
|
||||
ContainerName: container,
|
||||
BlobName: containerPath,
|
||||
}
|
||||
|
||||
// Sign with the delegation key
|
||||
queryParameters, err := vals.SignWithUserDelegation(userDelegationKey)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("signing SAS with user delegation failed: %w", err)
|
||||
}
|
||||
|
||||
// Append the SAS to the URL
|
||||
srcURL = srcBlobSVC.URL() + "?" + queryParameters.Encode()
|
||||
case f.sharedKeyCred != nil:
|
||||
// Generate a short lived SAS URL if using shared key credentials
|
||||
expiry := time.Now().Add(time.Hour)
|
||||
expiry := time.Now().Add(sasCopyValidity)
|
||||
sasOptions := blob.GetSASURLOptions{}
|
||||
srcURL, err = srcBlobSVC.GetSASURL(sas.BlobPermissions{Read: true}, expiry, &sasOptions)
|
||||
if err != nil {
|
||||
return srcURL, token, fmt.Errorf("failed to create SAS URL: %w", err)
|
||||
return srcURL, fmt.Errorf("failed to create SAS URL: %w", err)
|
||||
}
|
||||
case f.anonymous || f.opt.SASURL != "":
|
||||
// If using a SASURL or anonymous, no need for any extra auth
|
||||
default:
|
||||
return srcURL, token, errors.New("unknown authentication type")
|
||||
return srcURL, errors.New("unknown authentication type")
|
||||
}
|
||||
return srcURL, token, nil
|
||||
return srcURL, nil
|
||||
}
|
||||
|
||||
// Do multipart parallel copy.
|
||||
@@ -1747,7 +1807,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
o.fs = f
|
||||
o.remote = remote
|
||||
|
||||
srcURL, token, err := src.getAuth(ctx, true, false)
|
||||
srcURL, err := src.getAuth(ctx, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||
}
|
||||
@@ -1768,7 +1828,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
var (
|
||||
srcSize = src.size
|
||||
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
|
||||
numParts = (srcSize-1)/partSize + 1
|
||||
numParts = (srcSize + partSize - 1) / partSize
|
||||
blockIDs = make([]string, numParts) // list of blocks for finalize
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
checker = newCheckForInvalidBlockOrBlob("copy", o)
|
||||
@@ -1791,7 +1851,8 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
Count: partSize,
|
||||
},
|
||||
// Specifies the authorization scheme and signature for the copy source.
|
||||
CopySourceAuthorization: token,
|
||||
// We use SAS URLs as this doesn't seem to work always
|
||||
// CopySourceAuthorization: token,
|
||||
// CPKInfo *blob.CPKInfo
|
||||
// CPKScopeInfo *blob.CPKScopeInfo
|
||||
}
|
||||
@@ -1861,7 +1922,7 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
dstBlobSVC := f.getBlobSVC(dstContainer, dstPath)
|
||||
|
||||
// Get the source auth - none needed for same storage account
|
||||
srcURL, _, err := src.getAuth(ctx, false, f == src.fs)
|
||||
srcURL, err := src.getAuth(ctx, f == src.fs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||
}
|
||||
@@ -2176,11 +2237,6 @@ func (o *Object) getTags() (tags map[string]string) {
|
||||
// getBlobSVC creates a blob client
|
||||
func (o *Object) getBlobSVC() *blob.Client {
|
||||
container, directory := o.split()
|
||||
// If we are trying to remove an all / directory marker then
|
||||
// this will have one / too many now.
|
||||
if bucket.IsAllSlashes(o.remote) {
|
||||
directory = strings.TrimSuffix(directory, "/")
|
||||
}
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
@@ -2863,6 +2919,9 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
return ui, err
|
||||
}
|
||||
}
|
||||
// if ui.isDirMarker && strings.HasSuffix(containerPath, "//") {
|
||||
// containerPath = containerPath[:len(containerPath)-1]
|
||||
// }
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
|
||||
@@ -516,6 +516,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -921,7 +922,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
// getMetadata gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) getMetadata(ctx context.Context) error {
|
||||
resp, err := o.fileClient().GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -1673,6 +1673,21 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||
return o.getMetaDataListing(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// If using versionAt we need to list the find the correct version.
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
info, err := o.getMetaDataListing(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.Action == "hide" {
|
||||
// Rerturn object not found error if the current version is deleted.
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
_, info, err = o.getOrHead(ctx, "HEAD", nil)
|
||||
return info, err
|
||||
}
|
||||
@@ -1883,9 +1898,14 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
// Additionally, the official examples return S3 headers
|
||||
// instead of native, i.e. no file ID, use ones from listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
if info.ID == "" {
|
||||
info.ID = o.id
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -446,14 +446,14 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
t.Run("List", func(t *testing.T) {
|
||||
fstest.CheckListing(t, f, test.want)
|
||||
})
|
||||
// b2 NewObject doesn't work with VersionAt
|
||||
//t.Run("NewObject", func(t *testing.T) {
|
||||
// gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
// assert.Equal(t, test.wantErr, gotErr)
|
||||
// if gotErr == nil {
|
||||
// assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
// }
|
||||
//})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
gotObj, gotErr := f.NewObject(ctx, fileName)
|
||||
assert.Equal(t, test.wantErr, gotErr)
|
||||
if gotErr == nil {
|
||||
assert.Equal(t, test.wantSize, gotObj.Size())
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1861,6 +1861,8 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
||||
var (
|
||||
dest fs.Object
|
||||
err error
|
||||
|
||||
@@ -858,7 +858,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
return listP(ctx, uRemote, wrappedCallback)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
38
backend/doi/api/dataversetypes.go
Normal file
38
backend/doi/api/dataversetypes.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// Type definitions specific to Dataverse
|
||||
|
||||
package api
|
||||
|
||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
||||
type DataverseDatasetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data DataverseDataset `json:"data"`
|
||||
}
|
||||
|
||||
// DataverseDataset is the representation of a dataset
|
||||
type DataverseDataset struct {
|
||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
||||
}
|
||||
|
||||
// DataverseDatasetVersion is the representation of a dataset version
|
||||
type DataverseDatasetVersion struct {
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
Files []DataverseFile `json:"files"`
|
||||
}
|
||||
|
||||
// DataverseFile is the representation of a file found in a dataset
|
||||
type DataverseFile struct {
|
||||
DirectoryLabel string `json:"directoryLabel"`
|
||||
DataFile DataverseDataFile `json:"dataFile"`
|
||||
}
|
||||
|
||||
// DataverseDataFile represents file metadata details
|
||||
type DataverseDataFile struct {
|
||||
ID int64 `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"contentType"`
|
||||
FileSize int64 `json:"filesize"`
|
||||
OriginalFileFormat string `json:"originalFileFormat"`
|
||||
OriginalFileSize int64 `json:"originalFileSize"`
|
||||
OriginalFileName string `json:"originalFileName"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
33
backend/doi/api/inveniotypes.go
Normal file
33
backend/doi/api/inveniotypes.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Type definitions specific to InvenioRDM
|
||||
|
||||
package api
|
||||
|
||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
||||
type InvenioRecordResponse struct {
|
||||
Links InvenioRecordResponseLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioRecordResponseLinks represents a record's links
|
||||
type InvenioRecordResponseLinks struct {
|
||||
Self string `json:"self"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponse is the representation of a record's files
|
||||
type InvenioFilesResponse struct {
|
||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntry is the representation of a file entry
|
||||
type InvenioFilesResponseEntry struct {
|
||||
Key string `json:"key"`
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
Updated string `json:"updated"`
|
||||
MimeType string `json:"mimetype"`
|
||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntryLinks represents file links details
|
||||
type InvenioFilesResponseEntryLinks struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
26
backend/doi/api/types.go
Normal file
26
backend/doi/api/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package api has general type definitions for doi
|
||||
package api
|
||||
|
||||
// DoiResolverResponse is returned by the DOI resolver API
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
type DoiResolverResponse struct {
|
||||
ResponseCode int `json:"responseCode"`
|
||||
Handle string `json:"handle"`
|
||||
Values []DoiResolverResponseValue `json:"values"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValue is a single handle record value
|
||||
type DoiResolverResponseValue struct {
|
||||
Index int `json:"index"`
|
||||
Type string `json:"type"`
|
||||
Data DoiResolverResponseValueData `json:"data"`
|
||||
TTL int `json:"ttl"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValueData is the data held in a handle value
|
||||
type DoiResolverResponseValueData struct {
|
||||
Format string `json:"format"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
112
backend/doi/dataverse.go
Normal file
112
backend/doi/dataverse.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Implementation for Dataverse
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
return persistentID != ""
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
|
||||
query := url.Values{}
|
||||
query.Add("persistentId", persistentID)
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
||||
|
||||
return Dataverse, endpointURL, nil
|
||||
}
|
||||
|
||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
||||
type dataverseProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := dp.f.endpoint
|
||||
var res *http.Response
|
||||
var result api.DataverseDatasetResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
Parameters: filesURL.Query(),
|
||||
}
|
||||
err = dp.f.pacer.Call(func() (bool, error) {
|
||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
for _, file := range result.Data.LatestVersion.Files {
|
||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
||||
query := url.Values{}
|
||||
query.Add("format", "original")
|
||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
||||
entry := &Object{
|
||||
fs: dp.f,
|
||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
||||
contentURL: contentURL.String(),
|
||||
size: file.DataFile.FileSize,
|
||||
modTime: modTime,
|
||||
md5: file.DataFile.MD5,
|
||||
contentType: file.DataFile.ContentType,
|
||||
}
|
||||
if file.DataFile.OriginalFileName != "" {
|
||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
||||
entry.size = file.DataFile.OriginalFileSize
|
||||
entry.contentType = file.DataFile.OriginalFileFormat
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
dp.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newDataverseProvider(f *Fs) doiProvider {
|
||||
return &dataverseProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
649
backend/doi/doi.go
Normal file
649
backend/doi/doi.go
Normal file
@@ -0,0 +1,649 @@
|
||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
||||
//
|
||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// the URL of the DOI resolver
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
doiResolverAPIURL = "https://doi.org/api"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("doi remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "doi",
|
||||
Description: "DOI datasets",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "doi",
|
||||
Help: "The DOI or the doi.org URL.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: fs.ConfigProvider,
|
||||
Help: `DOI provider.
|
||||
|
||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "auto",
|
||||
Help: "Auto-detect provider",
|
||||
},
|
||||
{
|
||||
Value: string(Zenodo),
|
||||
Help: "Zenodo",
|
||||
}, {
|
||||
Value: string(Dataverse),
|
||||
Help: "Dataverse",
|
||||
}, {
|
||||
Value: string(Invenio),
|
||||
Help: "Invenio",
|
||||
}},
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Provider defines the type of provider hosting the DOI
|
||||
type Provider string
|
||||
|
||||
const (
|
||||
// Zenodo provider, see https://zenodo.org
|
||||
Zenodo Provider = "zenodo"
|
||||
// Dataverse provider, see https://dataverse.harvard.edu
|
||||
Dataverse Provider = "dataverse"
|
||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
||||
Invenio Provider = "invenio"
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
||||
Provider string `config:"provider"` // The DOI provider
|
||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
provider Provider // the DOI provider
|
||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL // the main API endpoint for this remote
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
cache *cache.Cache // a cache for the remote metadata
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // the remote path
|
||||
contentURL string // the URL where the contents of the file can be downloaded
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
contentType string // content type of the object
|
||||
md5 string // MD5 hash of the object content
|
||||
}
|
||||
|
||||
// doiProvider is the interface used to list objects in a DOI
|
||||
type doiProvider interface {
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
||||
}
|
||||
|
||||
// Parse the input string as a DOI
|
||||
// Examples:
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
func parseDoi(doi string) string {
|
||||
doiURL, err := url.Parse(doi)
|
||||
if err != nil {
|
||||
return doi
|
||||
}
|
||||
if doiURL.Scheme == "doi" {
|
||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
||||
}
|
||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
||||
return strings.TrimLeft(doiURL.Path, "/")
|
||||
}
|
||||
return doi
|
||||
}
|
||||
|
||||
// Resolve a DOI to a URL
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
||||
resolverURL := opt.DoiResolverAPIURL
|
||||
if resolverURL == "" {
|
||||
resolverURL = doiResolverAPIURL
|
||||
}
|
||||
|
||||
var result api.DoiResolverResponse
|
||||
params := url.Values{}
|
||||
params.Add("index", "1")
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolverURL,
|
||||
Path: "/handles/" + opt.Doi,
|
||||
Parameters: params,
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.ResponseCode != 1 {
|
||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
||||
}
|
||||
resolvedURLStr := ""
|
||||
for _, value := range result.Values {
|
||||
if value.Type == "URL" && value.Data.Format == "string" {
|
||||
valueStr, ok := value.Data.Value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
||||
}
|
||||
resolvedURLStr = valueStr
|
||||
}
|
||||
}
|
||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvedURL, nil
|
||||
}
|
||||
|
||||
// Resolve the passed configuration into a provider and enpoint
|
||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
switch opt.Provider {
|
||||
case string(Dataverse):
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
case string(Invenio):
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
case string(Zenodo):
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
|
||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
}
|
||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
||||
}
|
||||
|
||||
// Make the http connection from the passed options
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
||||
f.endpoint = endpoint
|
||||
f.endpointURL = endpoint.String()
|
||||
f.provider = provider
|
||||
f.opt.Provider = string(provider)
|
||||
|
||||
switch f.provider {
|
||||
case Dataverse:
|
||||
f.doiProvider = newDataverseProvider(f)
|
||||
case Invenio, Zenodo:
|
||||
f.doiProvider = newInvenioProvider(f)
|
||||
default:
|
||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
||||
}
|
||||
|
||||
// Determine if the root is a file
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.remote == f.root {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this res and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Doi = parseDoi(opt.Doi)
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cache: cache.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
// return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFullPath := remote
|
||||
if f.root != "" {
|
||||
remoteFullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == remoteFullPath {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
|
||||
fullDir := path.Join(f.root, dir)
|
||||
if fullDir != "" {
|
||||
fullDir += "/"
|
||||
}
|
||||
|
||||
dirPaths := map[string]bool{}
|
||||
for _, entry := range fileEntries {
|
||||
// First, filter out files not in `fullDir`
|
||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
||||
continue
|
||||
}
|
||||
// Then, find entries in subfolers
|
||||
remotePath := entry.remote
|
||||
if fullDir != "" {
|
||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
||||
}
|
||||
parts := strings.SplitN(remotePath, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
newEntry := *entry
|
||||
newEntry.remote = path.Join(dir, remotePath)
|
||||
entries = append(entries, &newEntry)
|
||||
} else {
|
||||
dirPaths[path.Join(dir, parts[0])] = true
|
||||
}
|
||||
}
|
||||
|
||||
for dirPath := range dirPaths {
|
||||
entry := fs.NewDir(dirPath, time.Time{})
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.contentURL,
|
||||
Options: options,
|
||||
}
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Handle non-compliant redirects
|
||||
if res.Header.Get("Location") != "" {
|
||||
newURL, err := res.Location()
|
||||
if err == nil {
|
||||
opts.RootURL = newURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "metadata",
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := map[string]any{}
|
||||
info["DOI"] = f.opt.Doi
|
||||
info["URL"] = doiURL.String()
|
||||
info["metadataURL"] = f.endpointURL
|
||||
info["provider"] = f.provider
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
260
backend/doi/doi_internal_test.go
Normal file
260
backend/doi/doi_internal_test.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var remoteName = "TestDoi"
|
||||
|
||||
func TestParseDoi(t *testing.T) {
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
doi := "10.1000/182"
|
||||
parsed := parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://doi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://dxdoi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
doi = "doi:10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi://10.1000/182 -> 10.1000/182
|
||||
doi = "doi://10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
}
|
||||
|
||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for resolving DOIs
|
||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are resolving a DOI
|
||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
||||
assert.NotEmpty(t, handle)
|
||||
index := r.URL.Query().Get("index")
|
||||
assert.Equal(t, "1", index)
|
||||
|
||||
// Return the most basic response
|
||||
result := api.DoiResolverResponse{
|
||||
ResponseCode: 1,
|
||||
Handle: handle,
|
||||
Values: []api.DoiResolverResponseValue{
|
||||
{
|
||||
Index: 1,
|
||||
Type: "URL",
|
||||
Data: api.DoiResolverResponseValueData{
|
||||
Format: "string",
|
||||
Value: resolvedURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts.URL + "/api"
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for a single record
|
||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning data about a single record
|
||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
||||
assert.NotEmpty(t, recordID)
|
||||
|
||||
// Return the most basic response
|
||||
selfURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
selfURL = selfURL.JoinPath(r.URL.String())
|
||||
result := api.InvenioRecordResponse{
|
||||
Links: api.InvenioRecordResponseLinks{
|
||||
Self: selfURL.String(),
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for listing files in a record
|
||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Return the most basic response
|
||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
||||
|
||||
entries := []api.InvenioFilesResponseEntry{}
|
||||
for filename, contents := range files {
|
||||
entries = append(entries,
|
||||
api.InvenioFilesResponseEntry{
|
||||
Key: filename,
|
||||
Checksum: md5Sum(contents),
|
||||
Size: int64(len(contents)),
|
||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
||||
MimeType: "text/plain; charset=utf-8",
|
||||
Links: api.InvenioFilesResponseEntryLinks{
|
||||
Content: filesBaseURL.JoinPath(filename).String(),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
result := api.InvenioFilesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for file contents
|
||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning the contents of a file
|
||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
||||
assert.NotEmpty(t, filename)
|
||||
contents, found := files[filename]
|
||||
if !found {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the most basic response
|
||||
_, err := w.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestZenodoRemote(t *testing.T) {
|
||||
recordID := "2600782"
|
||||
doi := "10.5281/zenodo.2600782"
|
||||
|
||||
// The files in the dataset
|
||||
files := map[string]string{
|
||||
"README.md": "This is a dataset.",
|
||||
"data.txt": "Some data",
|
||||
}
|
||||
|
||||
ts := prepareMockZenodoServer(t, files)
|
||||
resolvedURL := ts.URL + "/record/" + recordID
|
||||
|
||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
||||
|
||||
testConfig := configmap.Simple{
|
||||
"type": "doi",
|
||||
"doi": doi,
|
||||
"provider": "zenodo",
|
||||
"doi_resolver_api_url": doiResolverAPIURL,
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test listing the DOI files
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, len(files), len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "README.md", e.Remote())
|
||||
assert.Equal(t, int64(18), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "data.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test reading the DOI files
|
||||
o, err := f.NewObject(context.Background(), "README.md")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(18), o.Size())
|
||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["README.md"]), data)
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
|
||||
o, err = f.NewObject(context.Background(), "data.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
||||
fd, err = o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
||||
do, ok = o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
16
backend/doi/doi_test.go
Normal file
16
backend/doi/doi_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Test DOI filesystem interface
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDoi:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
164
backend/doi/invenio.go
Normal file
164
backend/doi/invenio.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Implementation for InvenioRDM
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
var res *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err = srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// First, attempt to grab the API URL from the headers
|
||||
var linksetURL *url.URL
|
||||
links := parseLinkHeader(res.Header.Get("Link"))
|
||||
for _, link := range links {
|
||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
||||
parsed, err := url.Parse(link.Href)
|
||||
if err == nil {
|
||||
linksetURL = parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if linksetURL != nil {
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// If there is no linkset header, try to grab the record ID from the URL
|
||||
recordID := ""
|
||||
resURL := res.Request.URL
|
||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
||||
if match != nil {
|
||||
recordID = match[1]
|
||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
||||
Path: "/api/records/" + recordID,
|
||||
})
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Links.Self == "" {
|
||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
||||
}
|
||||
return url.Parse(result.Links.Self)
|
||||
}
|
||||
|
||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
||||
type invenioProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := ip.f.endpoint.JoinPath("files")
|
||||
var result api.InvenioFilesResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
}
|
||||
err = ip.f.pacer.Call(func() (bool, error) {
|
||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
for _, file := range result.Entries {
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
entry := &Object{
|
||||
fs: ip.f,
|
||||
remote: file.Key,
|
||||
contentURL: file.Links.Content,
|
||||
size: file.Size,
|
||||
modTime: modTime,
|
||||
contentType: file.MimeType,
|
||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
ip.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newInvenioProvider(f *Fs) doiProvider {
|
||||
return &invenioProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
75
backend/doi/link_header.go
Normal file
75
backend/doi/link_header.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
||||
|
||||
// headerLink represents a link as presented in HTTP headers
|
||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
||||
type headerLink struct {
|
||||
Href string
|
||||
Rel string
|
||||
Type string
|
||||
Extras map[string]string
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
links = append(links, *parsed)
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
match := linkRegex.FindStringSubmatch(parts[0])
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &headerLink{
|
||||
Href: match[1],
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
|
||||
for _, keyValue := range parts[1:] {
|
||||
parsed := parseKeyValue(keyValue)
|
||||
if parsed != nil {
|
||||
key, value := parsed[0], parsed[1]
|
||||
switch strings.ToLower(key) {
|
||||
case "rel":
|
||||
result.Rel = value
|
||||
case "type":
|
||||
result.Type = value
|
||||
default:
|
||||
result.Extras[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseKeyValue(keyValue string) []string {
|
||||
parts := strings.SplitN(keyValue, "=", 2)
|
||||
if parts[0] == "" || len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
match := valueRegex.FindStringSubmatch(parts[1])
|
||||
if match != nil {
|
||||
parts[1] = match[1]
|
||||
return parts
|
||||
}
|
||||
return parts
|
||||
}
|
||||
44
backend/doi/link_header_internal_test.go
Normal file
44
backend/doi/link_header_internal_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLinkHeader(t *testing.T) {
|
||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
||||
links := parseLinkHeader(header)
|
||||
expected := headerLink{
|
||||
Href: "https://zenodo.org/api/records/15063252",
|
||||
Rel: "linkset",
|
||||
Type: "application/linkset+json",
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
assert.Contains(t, links, expected)
|
||||
|
||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
||||
links = parseLinkHeader(header)
|
||||
expectedList := []headerLink{{
|
||||
Href: "https://api.example.com/issues?page=2",
|
||||
Rel: "prev",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=4",
|
||||
Rel: "next",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=10",
|
||||
Rel: "last",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=1",
|
||||
Rel: "first",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}}
|
||||
assert.Equal(t, links, expectedList)
|
||||
}
|
||||
47
backend/doi/zenodo.go
Normal file
47
backend/doi/zenodo.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Implementation for Zenodo
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
||||
if match == nil {
|
||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
recordID := match[1]
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
||||
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpointURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
endpointURL, err = url.Parse(result.Links.Self)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return Zenodo, endpointURL, nil
|
||||
}
|
||||
81
backend/filelu/api/types.go
Normal file
81
backend/filelu/api/types.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Package api defines types for interacting with the FileLu API.
|
||||
package api
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// CreateFolderResponse represents the response for creating a folder.
|
||||
type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFolderResponse represents the response for deleting a folder.
|
||||
type DeleteFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// FolderListResponse represents the response for listing folders.
|
||||
type FolderListResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
FileCode string `json:"file_code"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"files"`
|
||||
Folders []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
} `json:"folders"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
||||
type FileDirectLinkResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
URL string `json:"url"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileInfoResponse represents the response for file information.
|
||||
type FileInfoResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Size string `json:"size"`
|
||||
Name string `json:"name"`
|
||||
FileCode string `json:"filecode"`
|
||||
Hash string `json:"hash"`
|
||||
Status int `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response for deleting a file.
|
||||
type DeleteFileResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse represents the response for account information.
|
||||
type AccountInfoResponse struct {
|
||||
Status int `json:"status"` // HTTP status code of the response.
|
||||
Msg string `json:"msg"` // Message describing the response.
|
||||
Result struct {
|
||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
||||
Email string `json:"email"` // User's email address.
|
||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
||||
Storage string `json:"storage"` // Total storage available to the user.
|
||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
||||
} `json:"result"` // Nested result structure containing account details.
|
||||
}
|
||||
366
backend/filelu/filelu.go
Normal file
366
backend/filelu/filelu.go
Normal file
@@ -0,0 +1,366 @@
|
||||
// Package filelu provides an interface to the FileLu storage system.
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Register the backend with Rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filelu",
|
||||
Description: "FileLu Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "key",
|
||||
Help: "Your FileLu Rclone key from My Account",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeExclamation |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeColon |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeLeftCrLfHtVt |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeSemicolon |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for the FileLu backend
|
||||
type Options struct {
|
||||
Key string `config:"key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents the FileLu file system
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpoint string
|
||||
pacer *pacer.Pacer
|
||||
srv *rest.Client
|
||||
client *http.Client
|
||||
targetFile string
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object for FileLu
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
if opt.Key == "" {
|
||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
if strings.TrimSpace(root) == "" {
|
||||
root = ""
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
filename := ""
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
endpoint: "https://filelu.com/rclone",
|
||||
client: client,
|
||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
||||
pacer: pacer.New(),
|
||||
targetFile: filename,
|
||||
root: root,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
WriteMetadata: false,
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
rootContainer, rootDirectory := rootSplit(f.root)
|
||||
if rootContainer != "" && rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = strings.Trim(newRoot, "/")
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = strings.Trim(oldRoot, "/")
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir to create directory on remote server.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Clean(f.root + "/" + dir)
|
||||
_, err := f.createFolder(ctx, fullPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// About provides usage statistics for the remote
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
accountInfo, err := f.getAccountInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
||||
}
|
||||
|
||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
||||
}
|
||||
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge deletes the directory and all its contents
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// List returns a list of files and folders
|
||||
// List returns a list of files and folders for the given directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
// Compose full path for API call
|
||||
fullPath := path.Join(f.root, dir)
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
if fullPath == "/" {
|
||||
fullPath = ""
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
result, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fldMap := map[string]bool{}
|
||||
for _, folder := range result.Result.Folders {
|
||||
fldMap[folder.FldID.String()] = true
|
||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(folder.Path, fullPath+"/")
|
||||
remote := paths[0]
|
||||
if len(paths) > 1 {
|
||||
remote = paths[1]
|
||||
}
|
||||
|
||||
if strings.Contains(remote, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
||||
if len(pathsWithoutRoot) > 1 {
|
||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
||||
}
|
||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
||||
}
|
||||
for _, file := range result.Result.Files {
|
||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, file.Name)
|
||||
// trim leading slashes
|
||||
remote = strings.TrimPrefix(remote, "/")
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: file.Size,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
entries = append(entries, obj)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := f.uploadFile(ctx, in, src.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObject := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// Move moves the file to the specified location
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
||||
|
||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
||||
dir := path.Dir(destinationPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dest, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = f.uploadFile(ctx, reader, destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: destinationPath,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
|
||||
// Step 1: Check if folder is empty
|
||||
listResp, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
||||
}
|
||||
|
||||
// Step 2: Delete the folder
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
324
backend/filelu/filelu_client.go
Normal file
324
backend/filelu/filelu_client.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/filelu/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createFolder creates a folder at the specified path.
|
||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||
encodedDir := f.fromStandardPath(dirPath)
|
||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
result := api.CreateFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var innerErr error
|
||||
resp, innerErr = f.client.Do(req)
|
||||
return fserrors.ShouldRetry(innerErr), innerErr
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFolderList List both files and folders in a directory.
|
||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||
encodedDir := f.fromStandardPath(path)
|
||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response api.FolderListResponse
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if response.Status != 200 {
|
||||
if strings.Contains(response.Msg, "Folder not found") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
||||
}
|
||||
|
||||
for index := range response.Result.Folders {
|
||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
||||
}
|
||||
|
||||
for index := range response.Result.Files {
|
||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
// deleteFolder deletes a folder at the specified path.
|
||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||
fullPath = f.fromStandardPath(fullPath)
|
||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(fullPath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
delResp := api.DeleteFolderResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||
}
|
||||
if delResp.Status != 200 {
|
||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectLink of file from FileLu to download.
|
||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.FileDirectLinkResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return result.Result.URL, result.Result.Size, nil
|
||||
}
|
||||
|
||||
// deleteFile deletes a file based on filePath
|
||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.DeleteFileResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/account/info",
|
||||
Parameters: url.Values{
|
||||
"key": {f.opt.Key},
|
||||
},
|
||||
}
|
||||
|
||||
var result api.AccountInfoResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return fserrors.ShouldRetry(callErr), callErr
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFileInfo retrieves file information based on file code
|
||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||
q := u.Query()
|
||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||
q.Set("key", f.opt.Key)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := api.FileInfoResponse{}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
193
backend/filelu/filelu_file_uploader.go
Normal file
193
backend/filelu/filelu_file_uploader.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// uploadFile uploads a file to FileLu
|
||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||
directory := path.Dir(fileFullPath)
|
||||
fileName := path.Base(fileFullPath)
|
||||
if directory == "." {
|
||||
directory = ""
|
||||
}
|
||||
destinationFolderPath := path.Join(f.root, directory)
|
||||
if destinationFolderPath != "" {
|
||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
||||
}
|
||||
|
||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to list existing files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range existingEntries {
|
||||
if entry.Remote() == fileFullPath {
|
||||
_, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file exists but is different, remove it
|
||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
||||
err = f.deleteFile(ctx, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
||||
}
|
||||
|
||||
// Since the fileCode isn't used, just handle the error
|
||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUploadServer gets the upload server URL with proper key authentication
|
||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
SessID string `json:"sess_id"`
|
||||
Result string `json:"result"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return result.Result, result.SessID, nil
|
||||
}
|
||||
|
||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
||||
destinationPath := f.fromStandardPath(dirPath)
|
||||
encodedFileName := f.fromStandardPath(fileName)
|
||||
pr, pw := io.Pipe()
|
||||
writer := multipart.NewWriter(pw)
|
||||
isDeletionRequired := false
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := pw.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close: %v", err)
|
||||
}
|
||||
}()
|
||||
_ = writer.WriteField("sess_id", sessID)
|
||||
_ = writer.WriteField("utype", "prem")
|
||||
_ = writer.WriteField("fld_path", destinationPath)
|
||||
|
||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, fileContent); err != nil {
|
||||
isDeletionRequired = true
|
||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
var fileCode string
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
||||
}
|
||||
defer respBodyClose(resp.Body)
|
||||
|
||||
var result []struct {
|
||||
FileCode string `json:"file_code"`
|
||||
FileStatus string `json:"file_status"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
||||
}
|
||||
|
||||
fileCode = result[0].FileCode
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil && isDeletionRequired {
|
||||
// Attempt to delete the file if upload fails
|
||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
||||
}
|
||||
|
||||
return fileCode, err
|
||||
}
|
||||
|
||||
// respBodyClose to check body response.
|
||||
func respBodyClose(responseBody io.Closer) {
|
||||
if cerr := responseBody.Close(); cerr != nil {
|
||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
||||
}
|
||||
}
|
||||
112
backend/filelu/filelu_helper.go
Normal file
112
backend/filelu/filelu_helper.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
// Prepare parent directory
|
||||
parentDir := path.Dir(filePath)
|
||||
|
||||
// Call List to get all the files
|
||||
result, err := f.getFolderList(ctx, parentDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range result.Result.Files {
|
||||
filePathFromServer := parentDir + "/" + file.Name
|
||||
if parentDir == "/" {
|
||||
filePathFromServer = "/" + file.Name
|
||||
}
|
||||
if filePath == filePathFromServer {
|
||||
return file.FileCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errFileNotFound
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
func (f *Fs) fromStandardPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toStandardPath(remote string) string {
|
||||
return f.opt.Enc.ToStandardPath(remote)
|
||||
}
|
||||
|
||||
// Hashes returns an empty hash set, indicating no hash support
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet() // Properly creates an empty hash set
|
||||
}
|
||||
|
||||
// Name returns the remote name
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Precision returns the precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
||||
}
|
||||
|
||||
// isFileCode checks if a string looks like a file code
|
||||
func isFileCode(s string) bool {
|
||||
if len(s) != 12 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
return fserrors.ShouldRetry(err)
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(code int) bool {
|
||||
return code == 429 || code >= 500
|
||||
}
|
||||
|
||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
||||
// No bucket
|
||||
if absPath == "" {
|
||||
return "", ""
|
||||
}
|
||||
slash := strings.IndexRune(absPath, '/')
|
||||
// Bucket but no path
|
||||
if slash < 0 {
|
||||
return absPath, ""
|
||||
}
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
259
backend/filelu/filelu_object.go
Normal file
259
backend/filelu/filelu_object.go
Normal file
@@ -0,0 +1,259 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a FileLu object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// NewObject creates a new Object for the given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var filePath string
|
||||
filePath = path.Join(f.root, remote)
|
||||
filePath = "/" + strings.Trim(filePath, "/")
|
||||
|
||||
// Get File code
|
||||
fileCode, err := f.getFileCode(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Get File info
|
||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
fileInfo := fileInfos.Result[0]
|
||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
||||
|
||||
returnedRemote := remote
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: returnedRemote,
|
||||
size: size,
|
||||
modTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
filePath := path.Join(o.fs.root, o.remote)
|
||||
// Get direct link
|
||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
||||
}
|
||||
|
||||
o.size = size
|
||||
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Wrap the response body to handle offset and count
|
||||
currentContents, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if offset > int64(len(currentContents)) {
|
||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||
}
|
||||
currentContents = currentContents[offset:]
|
||||
}
|
||||
if count > 0 && count < int64(len(currentContents)) {
|
||||
currentContents = currentContents[:count]
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update updates the object with new data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() <= 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
o.size = src.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from FileLu
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
||||
|
||||
err := o.fs.deleteFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the MD5 hash of an object
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
var fileCode string
|
||||
if isFileCode(o.fs.root) {
|
||||
fileCode = o.fs.root
|
||||
} else {
|
||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 && len(match[1]) == 12 {
|
||||
fileCode = match[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fileCode == "" {
|
||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||
}
|
||||
|
||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||
}
|
||||
|
||||
return result.Result[0].Hash, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the object
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable indicates whether the object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
16
backend/filelu/filelu_test.go
Normal file
16
backend/filelu/filelu_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package filelu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests for the FileLu backend
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileLu:",
|
||||
NilObject: nil,
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
||||
15
backend/filelu/utils.go
Normal file
15
backend/filelu/utils.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
||||
func parseStorageToBytes(storage string) (int64, error) {
|
||||
var gb float64
|
||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
||||
}
|
||||
return int64(gb * 1024 * 1024 * 1024), nil
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -185,6 +186,14 @@ Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -248,6 +257,7 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
@@ -266,6 +276,7 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -413,11 +424,26 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
|
||||
} else {
|
||||
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
var dialPort string
|
||||
_, dialPort, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -631,6 +657,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
|
||||
@@ -194,33 +194,9 @@ type DeleteResponse struct {
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
// DirectUploadURL returns the direct upload URL for Gofile
|
||||
func DirectUploadURL() string {
|
||||
return "https://upload.gofile.io/uploadfile"
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
|
||||
@@ -8,13 +8,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile/api"
|
||||
@@ -37,10 +35,8 @@ const (
|
||||
maxSleep = 20 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.gofile.io"
|
||||
serversExpiry = 60 * time.Second // check for new upload servers this often
|
||||
serversActive = 2 // choose this many closest upload servers to use
|
||||
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
|
||||
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
|
||||
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
|
||||
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -128,16 +124,13 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote gofile
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
serversMu *sync.Mutex // protect the servers info below
|
||||
servers []api.Server // upload servers we can use
|
||||
serversChecked time.Time // time the servers were refreshed
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
// Object describes a gofile object
|
||||
@@ -311,12 +304,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
serversMu: new(sync.Mutex),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
@@ -435,98 +427,6 @@ func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find the top n servers measured by response time
|
||||
func (f *Fs) bestServers(ctx context.Context, servers []api.Server, n int) (newServers []api.Server) {
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second))
|
||||
defer cancel()
|
||||
|
||||
if n > len(servers) {
|
||||
n = len(servers)
|
||||
}
|
||||
results := make(chan int, len(servers))
|
||||
|
||||
// Test how long the servers take to respond
|
||||
for i := range servers {
|
||||
i := i // for closure
|
||||
go func() {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: servers[i].Root(),
|
||||
}
|
||||
var result api.UploadServerStatus
|
||||
start := time.Now()
|
||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
ping := time.Since(start)
|
||||
err = result.Err(err)
|
||||
if err != nil {
|
||||
results <- -1 // send a -ve number on error
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "Upload server %v responded in %v", &servers[i], ping)
|
||||
results <- i
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for n servers to respond
|
||||
newServers = make([]api.Server, 0, n)
|
||||
for range servers {
|
||||
i := <-results
|
||||
if i >= 0 {
|
||||
newServers = append(newServers, servers[i])
|
||||
}
|
||||
if len(newServers) >= n {
|
||||
break
|
||||
}
|
||||
}
|
||||
return newServers
|
||||
}
|
||||
|
||||
// Clear all the upload servers - call on an error
|
||||
func (f *Fs) clearServers() {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
fs.Debugf(f, "Clearing upload servers")
|
||||
f.servers = nil
|
||||
}
|
||||
|
||||
// Gets an upload server
|
||||
func (f *Fs) getServer(ctx context.Context) (server *api.Server, err error) {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
if len(f.servers) == 0 || time.Since(f.serversChecked) >= serversExpiry {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/servers",
|
||||
}
|
||||
var result api.ServersResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if len(f.servers) == 0 {
|
||||
return nil, fmt.Errorf("failed to read upload servers: %w", err)
|
||||
}
|
||||
fs.Errorf(f, "failed to read new upload servers: %v", err)
|
||||
} else {
|
||||
// Find the top servers measured by response time
|
||||
f.servers = f.bestServers(ctx, result.Data.Servers, serversActive)
|
||||
f.serversChecked = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.servers) == 0 {
|
||||
return nil, errors.New("no upload servers found")
|
||||
}
|
||||
|
||||
// Pick a server at random since we've already found the top ones
|
||||
i := rand.Intn(len(f.servers))
|
||||
return &f.servers[i], nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
@@ -1526,13 +1426,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Find an upload server
|
||||
server, err := o.fs.getServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(o, "Using upload server %v", server)
|
||||
|
||||
// If the file exists, delete it after a successful upload
|
||||
if o.id != "" {
|
||||
id := o.id
|
||||
@@ -1561,7 +1454,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
MultipartContentName: "file",
|
||||
MultipartFileName: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
RootURL: server.URL(),
|
||||
RootURL: api.DirectUploadURL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1569,10 +1462,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if isAPIErr(err, "error-freespace") {
|
||||
fs.Errorf(o, "Upload server out of space - need to retry upload")
|
||||
}
|
||||
o.fs.clearServers()
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
return o.setMetaData(&result.Data)
|
||||
|
||||
@@ -483,6 +483,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -712,7 +715,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
@@ -959,7 +962,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
|
||||
@@ -346,7 +346,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
return nil, fmt.Errorf("failed to configure google photos: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
|
||||
@@ -617,16 +617,36 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
case 1:
|
||||
// upload file using link from first step
|
||||
var res *http.Response
|
||||
var location string
|
||||
|
||||
// Check to see if we are being redirected
|
||||
opts := &rest.Opts{
|
||||
Method: "HEAD",
|
||||
RootURL: getFirstStepResult.Data.SignURL,
|
||||
Options: options,
|
||||
NoRedirect: true,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
return o.fs.shouldRetry(ctx, res, err)
|
||||
})
|
||||
if res != nil {
|
||||
location = res.Header.Get("Location")
|
||||
if location != "" {
|
||||
// set the URL to the new Location
|
||||
opts.RootURL = location
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("head upload URL: %w", err)
|
||||
}
|
||||
|
||||
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
|
||||
|
||||
opts := &rest.Opts{
|
||||
Method: "PUT",
|
||||
RootURL: getFirstStepResult.Data.SignURL,
|
||||
Options: options,
|
||||
Body: file,
|
||||
ContentLength: &size,
|
||||
}
|
||||
opts.Method = "PUT"
|
||||
opts.Body = file
|
||||
opts.ContentLength = &size
|
||||
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, opts)
|
||||
|
||||
@@ -1090,6 +1090,10 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if r == hash.None {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Check that the underlying file hasn't changed
|
||||
o.fs.objectMetaMu.RLock()
|
||||
oldtime := o.modTime
|
||||
@@ -1197,7 +1201,15 @@ func (o *Object) Storable() bool {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
mode := o.mode
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
|
||||
// On Windows items with os.ModeIrregular are likely Junction
|
||||
// points so we treat them as symlinks for the purpose of ignoring them.
|
||||
// https://github.com/golang/go/issues/73827
|
||||
symlinkFlag := os.ModeSymlink
|
||||
if runtime.GOOS == "windows" {
|
||||
symlinkFlag |= os.ModeIrregular
|
||||
}
|
||||
if mode&symlinkFlag != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
|
||||
@@ -204,6 +204,23 @@ func TestSymlinkError(t *testing.T) {
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
func TestHashWithTypeNone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "file.txt"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
h, err := o.Hash(ctx, hash.None)
|
||||
require.Empty(t, h)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -17,9 +17,11 @@ Improvements:
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -216,7 +218,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
|
||||
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||
// Relevant issues:
|
||||
// https://github.com/rclone/rclone/issues/8565
|
||||
// https://github.com/meganz/webclient/issues/103
|
||||
clt := fshttp.NewClient(ctx)
|
||||
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
})
|
||||
srv = mega.New().SetClient(clt)
|
||||
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
|
||||
@@ -749,6 +749,8 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
|
||||
|
||||
// Fetch metadata and update updateInfo if --metadata is in use
|
||||
// modtime will still be set when there is no metadata to set
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
@@ -768,6 +770,8 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
}
|
||||
|
||||
// updateMetadata calls Get, Set, and Write
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
|
||||
_, err = o.meta.Get(ctx) // refresh permissions
|
||||
if err != nil {
|
||||
|
||||
@@ -1782,7 +1782,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -1862,7 +1864,9 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -2629,7 +2633,10 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
if info != nil {
|
||||
err = o.setMetaData(info)
|
||||
}
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
|
||||
@@ -378,12 +378,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
// XOpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// It truncates any existing object.
|
||||
//
|
||||
// OpenWriterAt disabled because it seems to have been disabled at pcloud
|
||||
// PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1
|
||||
//
|
||||
// {
|
||||
// "result": 2003,
|
||||
// "error": "Access denied. You do not have permissions to perform this operation."
|
||||
// }
|
||||
func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
|
||||
@@ -155,6 +155,7 @@ func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
|
||||
if err == nil && !info.Links.ApplicationOctetStream.Valid() {
|
||||
time.Sleep(5 * time.Second)
|
||||
return true, errors.New("no link")
|
||||
}
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
|
||||
@@ -467,6 +467,11 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
// when a zero-byte file was uploaded with an invalid captcha token
|
||||
f.rst.captcha.Invalidate()
|
||||
return true, err
|
||||
} else if strings.Contains(apiErr.Reason, "idx.shub.mypikpak.com") && apiErr.Code == 500 {
|
||||
// internal server error: Post "http://idx.shub.mypikpak.com": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
|
||||
// This typically happens when trying to retrieve a gcid for which no record exists.
|
||||
// No retry is needed in this case.
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -101,6 +101,12 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "Exaba",
|
||||
Help: "Exaba Object Storage",
|
||||
}, {
|
||||
Value: "FlashBlade",
|
||||
Help: "Pure Storage FlashBlade Object Storage",
|
||||
}, {
|
||||
Value: "GCS",
|
||||
Help: "Google Cloud Storage",
|
||||
@@ -131,6 +137,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Magalu",
|
||||
Help: "Magalu Object Storage",
|
||||
}, {
|
||||
Value: "Mega",
|
||||
Help: "MEGA S4 Object Storage",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -567,7 +576,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1003,6 +1012,12 @@ func init() {
|
||||
Value: "us-iad-1.linodeobjects.com",
|
||||
Help: "Washington, DC, (USA), us-iad-1",
|
||||
}},
|
||||
}, {
|
||||
// Lyve Cloud endpoints
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Lyve Cloud S3 API.\nRequired when using an S3 clone. Please type in your LyveCloud endpoint.\nExamples:\n- s3.us-west-1.{account_name}.lyve.seagate.com (US West 1 - California)\n- s3.eu-west-1.{account_name}.lyve.seagate.com (EU West 1 - Ireland)",
|
||||
Provider: "LyveCloud",
|
||||
Required: true,
|
||||
}, {
|
||||
// Magalu endpoints: https://docs.magalu.cloud/docs/object-storage/how-to/copy-url
|
||||
Name: "endpoint",
|
||||
@@ -1377,7 +1392,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1426,18 +1441,6 @@ func init() {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.us-east-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.us-west-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US West 1 (California)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "oos.eu-west-2.outscale.com",
|
||||
Help: "Outscale EU West 2 (Paris)",
|
||||
@@ -1526,6 +1529,22 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.s4.mega.io",
|
||||
Help: "Mega S4 eu-central-1 (Amsterdam)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.eu-central-2.s4.mega.io",
|
||||
Help: "Mega S4 eu-central-2 (Bettembourg)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.ca-central-1.s4.mega.io",
|
||||
Help: "Mega S4 ca-central-1 (Montreal)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.ca-west-1.s4.mega.io",
|
||||
Help: "Mega S4 ca-west-1 (Vancouver)",
|
||||
Provider: "Mega",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -1908,7 +1927,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1923,7 +1942,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare",
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade,Mega",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1981,6 +2000,7 @@ isn't set then "acl" is used instead.
|
||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||
header is added and the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
@@ -3116,6 +3136,9 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -3495,6 +3518,9 @@ func setQuirks(opt *Options) {
|
||||
case "Dreamhost":
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "FlashBlade":
|
||||
mightGzip = false // Never auto gzips objects
|
||||
virtualHostStyle = false // supports vhost but defaults to paths
|
||||
case "IBMCOS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
@@ -3527,6 +3553,14 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
case "Mega":
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
// Multipart server side copies not supported
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
@@ -3597,6 +3631,8 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
virtualHostStyle = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Exaba":
|
||||
virtualHostStyle = false
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
@@ -4425,7 +4461,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
if isDirectory {
|
||||
// process directory markers as directories
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
@@ -4740,7 +4776,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
|
||||
@@ -1550,7 +1550,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
|
||||
@@ -77,6 +77,10 @@ def check_file(file):
|
||||
viol = False
|
||||
new_lines = get_file_content("HEAD", file)
|
||||
old_lines = get_file_content("HEAD~1", file)
|
||||
|
||||
# If old file did not exist or was empty then don't check
|
||||
if not old_lines:
|
||||
return
|
||||
|
||||
# Entire autogenerated file check.
|
||||
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
|
||||
|
||||
@@ -41,8 +41,10 @@ docs = [
|
||||
"crypt.md",
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
|
||||
@@ -34,17 +34,15 @@ var commandDefinition = &cobra.Command{
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.SprintList()+`
|
||||
|
||||
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories ##
|
||||
## Files vs Directories
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
@@ -54,11 +52,11 @@ To choose which which part of the file path is affected some tags can be added t
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
|
||||
## Ordering and Conflicts ##
|
||||
## Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
@@ -73,19 +71,19 @@ user, allowing for intentional use cases (e.g., trimming one prefix before addin
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior ##
|
||||
## Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
|
||||
* To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -141,7 +141,7 @@ func TestTransform(t *testing.T) {
|
||||
}
|
||||
|
||||
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
|
||||
const alphabet = "abcdefg123456789"
|
||||
const alphabet = "abcdefg123456789Ü"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
@@ -251,3 +251,25 @@ func detectEncoding(s string) string {
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
|
||||
func TestUnicodeEquivalence(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
const remote = "Über"
|
||||
item := r.WriteObject(ctx, remote, "", t1)
|
||||
|
||||
obj, err := r.Fremote.NewObject(ctx, remote) // can't use r.CheckRemoteListing here as it forces NFC
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
|
||||
err = transform.SetOptions(ctx, "all,nfc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
item.Path = norm.NFC.String(item.Path)
|
||||
r.CheckRemoteListing(t, []fstest.Item{item}, nil)
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ func getFileHashByte(node any, hashType hash.Type) []byte {
|
||||
}
|
||||
|
||||
func getFileHash(node any, hashType hash.Type) string {
|
||||
if hashType == hash.None {
|
||||
return ""
|
||||
}
|
||||
|
||||
var o fs.Object
|
||||
|
||||
switch b := node.(type) {
|
||||
|
||||
@@ -123,7 +123,9 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
|
||||
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
|
||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
{{< provider name="Gofile" home="https://gofile.io/" config="/gofile/" >}}
|
||||
{{< provider name="Google Cloud Storage" home="https://cloud.google.com/storage/" config="/googlecloudstorage/" >}}
|
||||
@@ -148,7 +150,8 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Magalu" home="https://magalu.cloud/object-storage/" config="/s3/#magalu" >}}
|
||||
{{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
|
||||
{{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}
|
||||
{{< provider name="Mega" home="https://mega.nz/" config="/mega/" >}}
|
||||
{{< provider name="MEGA" home="https://mega.nz/" config="/mega/" >}}
|
||||
{{< provider name="MEGA S4" home="https://mega.io/objectstorage" config="/s3/#mega" >}}
|
||||
{{< provider name="Memory" home="/memory/" config="/memory/" >}}
|
||||
{{< provider name="Microsoft Azure Blob Storage" home="https://azure.microsoft.com/en-us/services/storage/blobs/" config="/azureblob/" >}}
|
||||
{{< provider name="Microsoft Azure Files Storage" home="https://azure.microsoft.com/en-us/services/storage/files/" config="/azurefiles/" >}}
|
||||
|
||||
@@ -973,3 +973,12 @@ put them back in again.` >}}
|
||||
* Jeff Geerling <geerlingguy@mac.com>
|
||||
* Germán Casares <german.casares.march+github@gmail.com>
|
||||
* fhuber <florian.huber@noris.de>
|
||||
* wbulot <wbulot@hotmail.com>
|
||||
* Jeremy Daer <jeremydaer@gmail.com>
|
||||
* Oleksiy Stashok <ostashok@tesla.com>
|
||||
* PrathameshLakawade <prathameshlakawade@gmail.com>
|
||||
* Nathanael Demacon <7271496+quantumsheep@users.noreply.github.com>
|
||||
* ahxxm <ahxxm@users.noreply.github.com>
|
||||
* Flora Thiebaut <johann.thiebaut@gmail.com>
|
||||
* kingston125 <support@filelu.com>
|
||||
* Ser-Bul <30335009+Ser-Bul@users.noreply.github.com>
|
||||
|
||||
@@ -719,6 +719,65 @@ Properties:
|
||||
- Type: int
|
||||
- Default: 16
|
||||
|
||||
#### --azureblob-copy-cutoff
|
||||
|
||||
Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of chunk_size using the put block list API.
|
||||
|
||||
Files smaller than this limit will be copied with the Copy Blob API.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_cutoff
|
||||
- Env Var: RCLONE_AZUREBLOB_COPY_CUTOFF
|
||||
- Type: SizeSuffix
|
||||
- Default: 8Mi
|
||||
|
||||
#### --azureblob-copy-concurrency
|
||||
|
||||
Concurrency for multipart copy.
|
||||
|
||||
This is the number of chunks of the same file that are copied
|
||||
concurrently.
|
||||
|
||||
These chunks are not buffered in memory and Microsoft recommends
|
||||
setting this value to greater than 1000 in the azcopy documentation.
|
||||
|
||||
https://learn.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-optimize#increase-concurrency
|
||||
|
||||
In tests, copy speed increases almost linearly with copy
|
||||
concurrency.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: copy_concurrency
|
||||
- Env Var: RCLONE_AZUREBLOB_COPY_CONCURRENCY
|
||||
- Type: int
|
||||
- Default: 512
|
||||
|
||||
#### --azureblob-use-copy-blob
|
||||
|
||||
Whether to use the Copy Blob API when copying to the same storage account.
|
||||
|
||||
If true (the default) then rclone will use the Copy Blob API for
|
||||
copies to the same storage account even when the size is above the
|
||||
copy_cutoff.
|
||||
|
||||
Rclone assumes that the same storage account means the same config
|
||||
and does not check for the same storage account in different configs.
|
||||
|
||||
There should be no need to change this value.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_copy_blob
|
||||
- Env Var: RCLONE_AZUREBLOB_USE_COPY_BLOB
|
||||
- Type: bool
|
||||
- Default: true
|
||||
|
||||
#### --azureblob-list-chunk
|
||||
|
||||
Size of blob list.
|
||||
|
||||
@@ -615,6 +615,42 @@ Properties:
|
||||
- Type: string
|
||||
- Required: false
|
||||
|
||||
#### --azurefiles-disable-instance-discovery
|
||||
|
||||
Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: disable_instance_discovery
|
||||
- Env Var: RCLONE_AZUREFILES_DISABLE_INSTANCE_DISCOVERY
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --azurefiles-use-az
|
||||
|
||||
Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: use_az
|
||||
- Env Var: RCLONE_AZUREFILES_USE_AZ
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --azurefiles-endpoint
|
||||
|
||||
Endpoint for the service.
|
||||
|
||||
@@ -390,6 +390,8 @@ Use client credentials OAuth flow.
|
||||
|
||||
This will use the OAUTH2 client Credentials Flow as described in RFC 6749.
|
||||
|
||||
Note that this option is NOT supported by all backends.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: client_credentials
|
||||
|
||||
@@ -5,6 +5,170 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.70.3 - 2025-07-09
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.2...v1.70.3)
|
||||
|
||||
* Bug Fixes
|
||||
* check: Fix difference report (was reporting error counts) (albertony)
|
||||
* march: Fix deadlock when using `--no-traverse` (Nick Craig-Wood)
|
||||
* doc fixes (albertony, Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Fix server side copy error "requires exactly one scope" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix finding objects when using `--b2-version-at` (Davide Bizzarri)
|
||||
* Linkbox
|
||||
* Fix upload error "user upload file not exist" (Nick Craig-Wood)
|
||||
* Pikpak
|
||||
* Improve error handling for missing links and unrecoverable 500s (wiserain)
|
||||
* WebDAV
|
||||
* Fix setting modtime to that of local object instead of remote (WeidiDeng)
|
||||
|
||||
## v1.70.2 - 2025-06-27
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.1...v1.70.2)
|
||||
|
||||
* Bug Fixes
|
||||
* convmv: Make --dry-run logs less noisy (nielash)
|
||||
* sync: Avoid copying dir metadata to itself (nielash)
|
||||
* build: Bump github.com/go-chi/chi/v5 from 5.2.1 to 5.2.2 to fix GHSA-vrw8-fxc6-2r93 (dependabot[bot])
|
||||
* convmv: Fix moving to unicode-equivalent name (nielash)
|
||||
* log: Fix deadlock when using systemd logging (Nick Craig-Wood)
|
||||
* pacer: Fix nil pointer deref in RetryError (Nick Craig-Wood)
|
||||
* doc fixes (Ali Zein Yousuf, Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix --skip-links on Windows when skipping Junction points (Nick Craig-Wood)
|
||||
* Combine
|
||||
* Fix directory not found errors with ListP interface (Nick Craig-Wood)
|
||||
* Mega
|
||||
* Fix tls handshake failure (necaran)
|
||||
* Pikpak
|
||||
* Fix uploads fail with "aws-chunked encoding is not supported" error (Nick Craig-Wood)
|
||||
|
||||
## v1.70.1 - 2025-06-19
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.70.1)
|
||||
|
||||
* Bug Fixes
|
||||
* convmv: Fix spurious "error running command echo" on Windows (Nick Craig-Wood)
|
||||
* doc fixes (albertony, Ed Craig-Wood, jinjingroad)
|
||||
|
||||
## v1.70.0 - 2025-06-17
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.70.0)
|
||||
|
||||
* New backends
|
||||
* [DOI](/doi/) (Flora Thiebaut)
|
||||
* [FileLu](/filelu/) (kingston125)
|
||||
* New S3 providers:
|
||||
* [MEGA S4](/s3/#mega) (Nick Craig-Wood)
|
||||
* [Pure Storage FlashBlade](/s3/#pure-storage-flashblade) (Jeremy Daer)
|
||||
* New commands
|
||||
* [convmv](/commands/rclone_convmv/): for moving and transforming files (nielash)
|
||||
* New Features
|
||||
* Add [`--max-connections`](/docs/#max-connections-n) to control maximum backend concurrency (Nick Craig-Wood)
|
||||
* Add [`--max-buffer-memory`](/docs/#max-buffer-memory) to limit total buffer memory usage (Nick Craig-Wood)
|
||||
* Add transform library and [`--name-transform`](/docs/#name-transform-command-xxxx) flag (nielash)
|
||||
* sync: Implement [`--list-cutoff`](/docs/#list-cutoff) to allow on disk sorting for reduced memory use (Nick Craig-Wood)
|
||||
* accounting: Add listed stat for number of directory entries listed (Nick Craig-Wood)
|
||||
* backend: Skip hash calculation when the hashType is None (Oleksiy Stashok)
|
||||
* build
|
||||
* Update to go1.24 and make go1.22 the minimum required version (Nick Craig-Wood)
|
||||
* Disable docker builds on PRs & add missing dockerfile changes (Anagh Kumar Baranwal)
|
||||
* Modernize Go usage (Nick Craig-Wood)
|
||||
* Update all dependencies (Nick Craig-Wood)
|
||||
* cmd/authorize: Show required arguments in help text (simwai)
|
||||
* cmd/config: add `--no-output` option (Jess)
|
||||
* cmd/gitannex
|
||||
* Tweak parsing of "rcloneremotename" config (Dan McArdle)
|
||||
* Permit remotes with options (Dan McArdle)
|
||||
* Reject unknown layout modes in INITREMOTE (Dan McArdle)
|
||||
* docker image: Add label org.opencontainers.image.source for release notes in Renovate dependency updates (Robin Schneider)
|
||||
* doc fixes (albertony, Andrew Kreimer, Ben Boeckel, Christoph Berger, Danny Garside, Dimitri Papadopoulos, eccoisle, Ed Craig-Wood, Fernando Fernández, jack, Jeff Geerling, Jugal Kishore, kingston125, luzpaz, Markus Gerstel, Matt Ickstadt, Michael Kebe, Nick Craig-Wood, PrathameshLakawade, Ser-Bul, simonmcnair, Tim White, Zachary Vorhies)
|
||||
* filter:
|
||||
* Add `--hash-filter` to deterministically select a subset of files (Nick Craig-Wood)
|
||||
* Show `--min-size` and `--max-size` in `--dump` filters (Nick Craig-Wood)
|
||||
* hash: Add SHA512 support for file hashes (Enduriel)
|
||||
* http servers: Add `--user-from-header` to use for authentication (Moises Lima)
|
||||
* lib/batcher: Deprecate unused option: batch_commit_timeout (Dan McArdle)
|
||||
* log:
|
||||
* Remove github.com/sirupsen/logrus and replace with log/slog (Nick Craig-Wood)
|
||||
* Add `--windows-event-log-level` to support Windows Event Log (Nick Craig-Wood)
|
||||
* rc
|
||||
* Add add `short` parameter to `core/stats` to not return transferring and checking (Nick Craig-Wood)
|
||||
* In `options/info` make FieldName contain a "." if it should be nested (Nick Craig-Wood)
|
||||
* Add rc control for serve commands (Nick Craig-Wood)
|
||||
* rcserver: Improve content-type check (Jonathan Giannuzzi)
|
||||
* serve nfs
|
||||
* Update docs to note Windows is not supported (Zachary Vorhies)
|
||||
* Change the format of `--nfs-cache-type symlink` file handles (Nick Craig-Wood)
|
||||
* Make metadata files have special file handles (Nick Craig-Wood)
|
||||
* touch: Make touch obey `--transfers` (Nick Craig-Wood)
|
||||
* version: Add `--deps` flag to show dependencies and other build info (Nick Craig-Wood)
|
||||
* Bug Fixes
|
||||
* serve s3:
|
||||
* Fix ListObjectsV2 response (fhuber)
|
||||
* Remove redundant handler initialization (Tho Neyugn)
|
||||
* stats: Fix goroutine leak and improve stats accounting process (Nathanael Demacon)
|
||||
* VFS
|
||||
* Add `--vfs-metadata-extension` to expose metadata sidecar files (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Add support for `x-ms-tags` header (Trevor Starick)
|
||||
* Cleanup uncommitted blocks on upload errors (Nick Craig-Wood)
|
||||
* Speed up server side copies for small files (Nick Craig-Wood)
|
||||
* Implement multipart server side copy (Nick Craig-Wood)
|
||||
* Remove uncommitted blocks on InvalidBlobOrBlock error (Nick Craig-Wood)
|
||||
* Fix handling of objects with // in (Nick Craig-Wood)
|
||||
* Handle retry error codes more carefully (Nick Craig-Wood)
|
||||
* Fix errors not being retried when doing single part copy (Nick Craig-Wood)
|
||||
* Fix multipart server side copies of 0 sized files (Nick Craig-Wood)
|
||||
* Azurefiles
|
||||
* Add `--azurefiles-use-az` and `--azurefiles-disable-instance-discovery` (b-wimmer)
|
||||
* B2
|
||||
* Add SkipDestructive handling to backend commands (Pat Patterson)
|
||||
* Use file id from listing when not presented in headers (ahxxm)
|
||||
* Cloudinary
|
||||
* Automatically add/remove known media files extensions (yuval-cloudinary)
|
||||
* Var naming convention (yuval-cloudinary)
|
||||
* Drive
|
||||
* Added `backend moveid` command (Spencer McCullough)
|
||||
* Dropbox
|
||||
* Support Dropbox Paper (Dave Vasilevsky)
|
||||
* FTP
|
||||
* Add `--ftp-http-proxy` to connect via HTTP CONNECT proxy
|
||||
* Gofile
|
||||
* Update to use new direct upload endpoint (wbulot)
|
||||
* Googlephotos
|
||||
* Update read only and read write scopes to meet Google's requirements. (Germán Casares)
|
||||
* Iclouddrive
|
||||
* Fix panic and files potentially downloaded twice (Clément Wehrung)
|
||||
* Internetarchive
|
||||
* Add `--internetarchive-metadata="key=value"` for setting item metadata (Corentin Barreau)
|
||||
* Onedrive
|
||||
* Fix "The upload session was not found" errors (Nick Craig-Wood)
|
||||
* Re-add `--onedrive-upload-cutoff` flag (Nick Craig-Wood)
|
||||
* Fix crash if no metadata was updated (Nick Craig-Wood)
|
||||
* Opendrive
|
||||
* Added `--opendrive-access` flag to handle permissions (Joel K Biju)
|
||||
* Pcloud
|
||||
* Fix "Access denied. You do not have permissions to perform this operation" on large uploads (Nick Craig-Wood)
|
||||
* S3
|
||||
* Fix handling of objects with // in (Nick Craig-Wood)
|
||||
* Add IBM IAM signer (Alexander Minbaev)
|
||||
* Split the GCS quirks into `--s3-use-x-id` and `--s3-sign-accept-encoding` (Nick Craig-Wood)
|
||||
* Implement paged listing interface ListP (Nick Craig-Wood)
|
||||
* Add Pure Storage FlashBlade provider support (Jeremy Daer)
|
||||
* Require custom endpoint for Lyve Cloud v2 support (PrathameshLakawade)
|
||||
* MEGA S4 support (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Add `--sftp-http-proxy` to connect via HTTP CONNECT proxy (Nick Craig-Wood)
|
||||
* Smb
|
||||
* Add support for kerberos authentication (Jonathan Giannuzzi)
|
||||
* Improve connection pooling efficiency (Jonathan Giannuzzi)
|
||||
* WebDAV
|
||||
* Retry propfind on 425 status (Jörn Friedrich Dreyer)
|
||||
* Add an ownCloud Infinite Scale vendor that enables tus chunked upload support (Klaas Freitag)
|
||||
|
||||
## v1.69.3 - 2025-05-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
|
||||
|
||||
@@ -37,6 +37,8 @@ rclone [flags]
|
||||
--azureblob-client-id string The ID of the client in use
|
||||
--azureblob-client-secret string One of the service principal's client secrets
|
||||
--azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
|
||||
--azureblob-copy-concurrency int Concurrency for multipart copy (default 512)
|
||||
--azureblob-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 8Mi)
|
||||
--azureblob-delete-snapshots string Set to specify how to deal with snapshots on blob deletion
|
||||
--azureblob-description string Description of the remote
|
||||
--azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
|
||||
@@ -60,6 +62,7 @@ rclone [flags]
|
||||
--azureblob-upload-concurrency int Concurrency for multipart uploads (default 16)
|
||||
--azureblob-upload-cutoff string Cutoff for switching to chunked upload (<= 256 MiB) (deprecated)
|
||||
--azureblob-use-az Use Azure CLI tool az for authentication
|
||||
--azureblob-use-copy-blob Whether to use the Copy Blob API when copying to the same storage account (default true)
|
||||
--azureblob-use-emulator Uses local storage emulator if provided as 'true'
|
||||
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
|
||||
--azureblob-username string User name (usually an email address)
|
||||
@@ -72,6 +75,7 @@ rclone [flags]
|
||||
--azurefiles-client-send-certificate-chain Send the certificate chain when using certificate auth
|
||||
--azurefiles-connection-string string Azure Files Connection String
|
||||
--azurefiles-description string Description of the remote
|
||||
--azurefiles-disable-instance-discovery Skip requesting Microsoft Entra instance metadata
|
||||
--azurefiles-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot)
|
||||
--azurefiles-endpoint string Endpoint for the service
|
||||
--azurefiles-env-auth Read credentials from runtime (environment variables, CLI or MSI)
|
||||
@@ -86,6 +90,7 @@ rclone [flags]
|
||||
--azurefiles-share-name string Azure Files Share Name
|
||||
--azurefiles-tenant string ID of the service principal's tenant. Also called its directory ID
|
||||
--azurefiles-upload-concurrency int Concurrency for multipart uploads (default 16)
|
||||
--azurefiles-use-az Use Azure CLI tool az for authentication
|
||||
--azurefiles-use-msi Use a managed service identity to authenticate (only works in Azure)
|
||||
--azurefiles-username string User name (usually an email address)
|
||||
--b2-account string Account ID or Application Key ID
|
||||
@@ -160,12 +165,14 @@ rclone [flags]
|
||||
--chunker-remote string Remote to chunk/unchunk
|
||||
--client-cert string Client SSL certificate (PEM) for mutual TLS auth
|
||||
--client-key string Client SSL private key (PEM) for mutual TLS auth
|
||||
--cloudinary-adjust-media-files-extensions Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems (default true)
|
||||
--cloudinary-api-key string Cloudinary API Key
|
||||
--cloudinary-api-secret string Cloudinary API Secret
|
||||
--cloudinary-cloud-name string Cloudinary Environment Name
|
||||
--cloudinary-description string Description of the remote
|
||||
--cloudinary-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
|
||||
--cloudinary-eventually-consistent-delay Duration Wait N seconds for eventual consistency of the databases that support the backend operation (default 0s)
|
||||
--cloudinary-media-extensions stringArray Cloudinary supported media extensions (default 3ds,3g2,3gp,ai,arw,avi,avif,bmp,bw,cr2,cr3,djvu,dng,eps3,fbx,flif,flv,gif,glb,gltf,hdp,heic,heif,ico,indd,jp2,jpe,jpeg,jpg,jxl,jxr,m2ts,mov,mp4,mpeg,mts,mxf,obj,ogv,pdf,ply,png,psd,svg,tga,tif,tiff,ts,u3ma,usdz,wdp,webm,webp,wmv)
|
||||
--cloudinary-upload-prefix string Specify the API endpoint for environments out of the US
|
||||
--cloudinary-upload-preset string Upload Preset to select asset manipulation on upload
|
||||
--color AUTO|NEVER|ALWAYS When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default AUTO)
|
||||
@@ -204,6 +211,10 @@ rclone [flags]
|
||||
--disable string Disable a comma separated list of features (use --disable help to see a list)
|
||||
--disable-http-keep-alives Disable HTTP keep-alives and use each connection once
|
||||
--disable-http2 Disable HTTP/2 in the global transport
|
||||
--doi-description string Description of the remote
|
||||
--doi-doi string The DOI or the doi.org URL
|
||||
--doi-doi-resolver-api-url string The URL of the DOI resolver API to use
|
||||
--doi-provider string DOI provider
|
||||
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
|
||||
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs
|
||||
--drive-auth-owner-only Only consider files owned by the authenticated user
|
||||
@@ -255,7 +266,6 @@ rclone [flags]
|
||||
--drive-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
|
||||
--dropbox-auth-url string Auth server URL
|
||||
--dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
|
||||
--dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
|
||||
--dropbox-batch-size int Max number of files in upload batch
|
||||
--dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
|
||||
@@ -265,11 +275,14 @@ rclone [flags]
|
||||
--dropbox-client-secret string OAuth Client Secret
|
||||
--dropbox-description string Description of the remote
|
||||
--dropbox-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
|
||||
--dropbox-export-formats CommaSepList Comma separated list of preferred formats for exporting files (default html,md)
|
||||
--dropbox-impersonate string Impersonate this user when using a business account
|
||||
--dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
|
||||
--dropbox-root-namespace string Specify a different Dropbox namespace ID to use as the root for all paths
|
||||
--dropbox-shared-files Instructs rclone to work on individual shared files
|
||||
--dropbox-shared-folders Instructs rclone to work on shared folders
|
||||
--dropbox-show-all-exports Show all exportable files in listings
|
||||
--dropbox-skip-exports Skip exportable files in all listings
|
||||
--dropbox-token string OAuth Access Token as a JSON blob
|
||||
--dropbox-token-url string Token server url
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
@@ -298,6 +311,9 @@ rclone [flags]
|
||||
--filefabric-token-expiry string Token expiry time
|
||||
--filefabric-url string URL of the Enterprise File Fabric to connect to
|
||||
--filefabric-version string Version read from the file fabric
|
||||
--filelu-description string Description of the remote
|
||||
--filelu-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,Colon,Question,Asterisk,Pipe,Hash,Percent,BackSlash,CrLf,Del,Ctl,LeftSpace,LeftPeriod,LeftTilde,LeftCrLfHtVt,RightSpace,RightPeriod,RightCrLfHtVt,InvalidUtf8,Dot,SquareBracket,Semicolon,Exclamation)
|
||||
--filelu-key string Your FileLu Rclone key from My Account
|
||||
--files-from stringArray Read list of source-file names from file (use - to read from stdin)
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
--filescom-api-key string The API key used to authenticate with Files.com
|
||||
@@ -323,6 +339,7 @@ rclone [flags]
|
||||
--ftp-explicit-tls Use Explicit FTPS (FTP over TLS)
|
||||
--ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD
|
||||
--ftp-host string FTP host to connect to
|
||||
--ftp-http-proxy string URL for HTTP CONNECT proxy
|
||||
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--ftp-no-check-certificate Do not verify the TLS certificate of the server
|
||||
--ftp-no-check-upload Don't check the upload is OK
|
||||
@@ -364,7 +381,6 @@ rclone [flags]
|
||||
--gofile-list-chunk int Number of items to list in each call (default 1000)
|
||||
--gofile-root-folder-id string ID of the root folder
|
||||
--gphotos-auth-url string Auth server URL
|
||||
--gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
|
||||
--gphotos-batch-mode string Upload file batching sync|async|off (default "sync")
|
||||
--gphotos-batch-size int Max number of files in upload batch
|
||||
--gphotos-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
|
||||
@@ -380,6 +396,7 @@ rclone [flags]
|
||||
--gphotos-start-year int Year limits the photos to be downloaded to those which are uploaded after the given year (default 2000)
|
||||
--gphotos-token string OAuth Access Token as a JSON blob
|
||||
--gphotos-token-url string Token server url
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
|
||||
--hasher-description string Description of the remote
|
||||
--hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
|
||||
@@ -449,6 +466,8 @@ rclone [flags]
|
||||
--internetarchive-encoding Encoding The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
|
||||
--internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
|
||||
--internetarchive-front-endpoint string Host of InternetArchive Frontend (default "https://archive.org")
|
||||
--internetarchive-item-derive Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload (default true)
|
||||
--internetarchive-item-metadata stringArray Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set
|
||||
--internetarchive-secret-access-key string IAS3 Secret Key (password)
|
||||
--internetarchive-wait-archive Duration Timeout for waiting the server's processing tasks (specifically archive and book_op) to finish (default 0s)
|
||||
--jottacloud-auth-url string Auth server URL
|
||||
@@ -476,6 +495,7 @@ rclone [flags]
|
||||
--linkbox-description string Description of the remote
|
||||
--linkbox-token string Token from https://www.linkbox.to/admin/account
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--list-cutoff int To save memory, sort directory listings on disk above this threshold (default 1000000)
|
||||
--local-case-insensitive Force the filesystem to report itself as case insensitive
|
||||
--local-case-sensitive Force the filesystem to report itself as case sensitive
|
||||
--local-description string Description of the remote
|
||||
@@ -491,7 +511,7 @@ rclone [flags]
|
||||
--local-unicode-normalization Apply unicode NFC normalization to paths and filenames
|
||||
--local-zero-size-links Assume the Stat size of links is zero (and read them instead) (deprecated)
|
||||
--log-file string Log everything to this file
|
||||
--log-format string Comma separated list of log format options (default "date,time")
|
||||
--log-format Bits Comma separated list of log format options (default date,time)
|
||||
--log-level LogLevel Log level DEBUG|INFO|NOTICE|ERROR (default NOTICE)
|
||||
--log-systemd Activate systemd integration for the logger
|
||||
--low-level-retries int Number of low level retries to do (default 10)
|
||||
@@ -512,6 +532,8 @@ rclone [flags]
|
||||
--mailru-user string User name (usually email)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
|
||||
--max-buffer-memory SizeSuffix If set, don't allocate more than this amount of memory as buffers (default off)
|
||||
--max-connections int Maximum number of simultaneous backend API connections, 0 for unlimited
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
|
||||
--max-depth int If set limits the recursion depth to this (default -1)
|
||||
@@ -553,6 +575,7 @@ rclone [flags]
|
||||
--metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
|
||||
--metrics-template string User-specified template
|
||||
--metrics-user string User name for authentication
|
||||
--metrics-user-from-header string User name from a defined HTTP header
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||
--modify-window Duration Max time diff to be considered the same (default 1ns)
|
||||
@@ -560,6 +583,7 @@ rclone [flags]
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--netstorage-account string Set the NetStorage account name
|
||||
--netstorage-description string Description of the remote
|
||||
--netstorage-host string Domain+path of NetStorage host to connect to
|
||||
@@ -601,6 +625,7 @@ rclone [flags]
|
||||
--onedrive-tenant string ID of the service principal's tenant. Also called its directory ID
|
||||
--onedrive-token string OAuth Access Token as a JSON blob
|
||||
--onedrive-token-url string Token server url
|
||||
--onedrive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default off)
|
||||
--oos-attempt-resume-upload If true attempt to resume previously started multipart upload for the object
|
||||
--oos-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
|
||||
--oos-compartment string Specify compartment OCID, if you need to list buckets
|
||||
@@ -626,6 +651,7 @@ rclone [flags]
|
||||
--oos-storage-tier string The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm (default "Standard")
|
||||
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
|
||||
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
|
||||
--opendrive-access string Files and folders will be uploaded with this access permission (default private) (default "private")
|
||||
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
|
||||
--opendrive-description string Description of the remote
|
||||
--opendrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
|
||||
@@ -736,6 +762,7 @@ rclone [flags]
|
||||
--rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-template string User-specified template
|
||||
--rc-user string User name for authentication
|
||||
--rc-user-from-header string User name from a defined HTTP header
|
||||
--rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
|
||||
--rc-web-gui Launch WebGUI on localhost
|
||||
--rc-web-gui-force-update Force update to latest version of web gui
|
||||
@@ -760,6 +787,8 @@ rclone [flags]
|
||||
--s3-endpoint string Endpoint for S3 API
|
||||
--s3-env-auth Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars)
|
||||
--s3-force-path-style If true use path style access if false use virtual hosted style (default true)
|
||||
--s3-ibm-api-key string IBM API Key to be used to obtain IAM token
|
||||
--s3-ibm-resource-instance-id string IBM service instance id
|
||||
--s3-leave-parts-on-error If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery
|
||||
--s3-list-chunk int Size of listing chunk (response list for each ListObject S3 request) (default 1000)
|
||||
--s3-list-url-encode Tristate Whether to url encode listings: true/false/unset (default unset)
|
||||
@@ -780,6 +809,7 @@ rclone [flags]
|
||||
--s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3
|
||||
--s3-session-token string An AWS session token
|
||||
--s3-shared-credentials-file string Path to the shared credentials file
|
||||
--s3-sign-accept-encoding Tristate Set if rclone should include Accept-Encoding as part of the signature (default unset)
|
||||
--s3-sse-customer-algorithm string If using SSE-C, the server-side encryption algorithm used when storing this object in S3
|
||||
--s3-sse-customer-key string To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data
|
||||
--s3-sse-customer-key-base64 string If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data
|
||||
@@ -796,6 +826,7 @@ rclone [flags]
|
||||
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
|
||||
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
|
||||
--s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset)
|
||||
--s3-use-x-id Tristate Set if rclone should add x-id URL parameters (default unset)
|
||||
--s3-v2-auth If true use v2 authentication
|
||||
--s3-version-at Time Show file versions as they were at the specified time (default off)
|
||||
--s3-version-deleted Show deleted file markers when using versions
|
||||
@@ -822,6 +853,7 @@ rclone [flags]
|
||||
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
|
||||
--sftp-host string SSH host to connect to
|
||||
--sftp-host-key-algorithms SpaceSepList Space separated list of host key algorithms, ordered by preference
|
||||
--sftp-http-proxy string URL for HTTP CONNECT proxy
|
||||
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
|
||||
--sftp-key-exchange SpaceSepList Space separated list of key exchange algorithms, ordered by preference
|
||||
--sftp-key-file string Path to PEM-encoded private key file
|
||||
@@ -877,6 +909,7 @@ rclone [flags]
|
||||
--smb-pass string SMB password (obscured)
|
||||
--smb-port int SMB port number (default 445)
|
||||
--smb-spn string Service principal name
|
||||
--smb-use-kerberos Use Kerberos authentication
|
||||
--smb-user string SMB username (default "$USER")
|
||||
--stats Duration Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable) (default 1m0s)
|
||||
--stats-file-name-length int Max file name length in stats (0 for no limit) (default 45)
|
||||
@@ -965,7 +998,7 @@ rclone [flags]
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.69.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.70.3")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
@@ -1017,6 +1050,7 @@ rclone [flags]
|
||||
* [rclone cleanup](/commands/rclone_cleanup/) - Clean up the remote if possible.
|
||||
* [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell.
|
||||
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
* [rclone convmv](/commands/rclone_convmv/) - Convert file and directory names in place.
|
||||
* [rclone copy](/commands/rclone_copy/) - Copy files from source to dest, skipping identical files.
|
||||
* [rclone copyto](/commands/rclone_copyto/) - Copy files from source to dest, skipping identical files.
|
||||
* [rclone copyurl](/commands/rclone_copyurl/) - Copy the contents of the URL supplied content to dest:path.
|
||||
|
||||
@@ -14,13 +14,18 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize [flags]
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
@@ -93,6 +93,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -129,6 +130,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -74,6 +74,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -96,6 +96,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -82,6 +82,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -123,6 +123,7 @@ rclone config create name type [key value]* [flags]
|
||||
--continue Continue the configuration process with an answer
|
||||
-h, --help help for create
|
||||
--no-obscure Force any passwords not to be obscured
|
||||
--no-output Don't provide any output
|
||||
--non-interactive Don't interact with user and return questions
|
||||
--obscure Force any passwords to be obscured
|
||||
--result string Result - use with --continue
|
||||
|
||||
@@ -21,12 +21,12 @@ password to re-encrypt the config.
|
||||
|
||||
When `--password-command` is called to change the password then the
|
||||
environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with `rclone config
|
||||
encryption remove`), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
|
||||
|
||||
|
||||
@@ -123,6 +123,7 @@ rclone config update name [key value]+ [flags]
|
||||
--continue Continue the configuration process with an answer
|
||||
-h, --help help for update
|
||||
--no-obscure Force any passwords not to be obscured
|
||||
--no-output Don't provide any output
|
||||
--non-interactive Don't interact with user and return questions
|
||||
--obscure Force any passwords to be obscured
|
||||
--result string Result - use with --continue
|
||||
|
||||
398
docs/content/commands/rclone_convmv.md
Normal file
398
docs/content/commands/rclone_convmv.md
Normal file
@@ -0,0 +1,398 @@
|
||||
---
|
||||
title: "rclone convmv"
|
||||
description: "Convert file and directory names in place."
|
||||
versionIntroduced: v1.70
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/convmv/ and as part of making a release run "make commanddocs"
|
||||
---
|
||||
# rclone convmv
|
||||
|
||||
Convert file and directory names in place.
|
||||
|
||||
## Synopsis
|
||||
|
||||
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
| Command | Description |
|
||||
|------|------|
|
||||
| `--name-transform prefix=XXXX` | Prepends XXXX to the file name. |
|
||||
| `--name-transform suffix=XXXX` | Appends XXXX to the file name after the extension. |
|
||||
| `--name-transform suffix_keep_extension=XXXX` | Appends XXXX to the file name while preserving the original file extension. |
|
||||
| `--name-transform trimprefix=XXXX` | Removes XXXX if it appears at the start of the file name. |
|
||||
| `--name-transform trimsuffix=XXXX` | Removes XXXX if it appears at the end of the file name. |
|
||||
| `--name-transform regex=/pattern/replacement/` | Applies a regex-based transformation. |
|
||||
| `--name-transform replace=old:new` | Replaces occurrences of old with new in the file name. |
|
||||
| `--name-transform date={YYYYMMDD}` | Appends or prefixes the specified date format. |
|
||||
| `--name-transform truncate=N` | Truncates the file name to a maximum of N characters. |
|
||||
| `--name-transform base64encode` | Encodes the file name in Base64. |
|
||||
| `--name-transform base64decode` | Decodes a Base64-encoded file name. |
|
||||
| `--name-transform encoder=ENCODING` | Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh). |
|
||||
| `--name-transform decoder=ENCODING` | Decodes the file name from the specified encoding. |
|
||||
| `--name-transform charmap=MAP` | Applies a character mapping transformation. |
|
||||
| `--name-transform lowercase` | Converts the file name to lowercase. |
|
||||
| `--name-transform uppercase` | Converts the file name to UPPERCASE. |
|
||||
| `--name-transform titlecase` | Converts the file name to Title Case. |
|
||||
| `--name-transform ascii` | Strips non-ASCII characters. |
|
||||
| `--name-transform url` | URL-encodes the file name. |
|
||||
| `--name-transform nfc` | Converts the file name to NFC Unicode normalization form. |
|
||||
| `--name-transform nfd` | Converts the file name to NFD Unicode normalization form. |
|
||||
| `--name-transform nfkc` | Converts the file name to NFKC Unicode normalization form. |
|
||||
| `--name-transform nfkd` | Converts the file name to NFKD Unicode normalization form. |
|
||||
| `--name-transform command=/path/to/my/programfile names.` | Executes an external program to transform |
|
||||
|
||||
|
||||
Conversion modes:
|
||||
```
|
||||
none
|
||||
nfc
|
||||
nfd
|
||||
nfkc
|
||||
nfkd
|
||||
replace
|
||||
prefix
|
||||
suffix
|
||||
suffix_keep_extension
|
||||
trimprefix
|
||||
trimsuffix
|
||||
index
|
||||
date
|
||||
truncate
|
||||
base64encode
|
||||
base64decode
|
||||
encoder
|
||||
decoder
|
||||
ISO-8859-1
|
||||
Windows-1252
|
||||
Macintosh
|
||||
charmap
|
||||
lowercase
|
||||
uppercase
|
||||
titlecase
|
||||
ascii
|
||||
url
|
||||
regex
|
||||
command
|
||||
```
|
||||
Char maps:
|
||||
```
|
||||
|
||||
IBM-Code-Page-037
|
||||
IBM-Code-Page-437
|
||||
IBM-Code-Page-850
|
||||
IBM-Code-Page-852
|
||||
IBM-Code-Page-855
|
||||
Windows-Code-Page-858
|
||||
IBM-Code-Page-860
|
||||
IBM-Code-Page-862
|
||||
IBM-Code-Page-863
|
||||
IBM-Code-Page-865
|
||||
IBM-Code-Page-866
|
||||
IBM-Code-Page-1047
|
||||
IBM-Code-Page-1140
|
||||
ISO-8859-1
|
||||
ISO-8859-2
|
||||
ISO-8859-3
|
||||
ISO-8859-4
|
||||
ISO-8859-5
|
||||
ISO-8859-6
|
||||
ISO-8859-7
|
||||
ISO-8859-8
|
||||
ISO-8859-9
|
||||
ISO-8859-10
|
||||
ISO-8859-13
|
||||
ISO-8859-14
|
||||
ISO-8859-15
|
||||
ISO-8859-16
|
||||
KOI8-R
|
||||
KOI8-U
|
||||
Macintosh
|
||||
Macintosh-Cyrillic
|
||||
Windows-874
|
||||
Windows-1250
|
||||
Windows-1251
|
||||
Windows-1252
|
||||
Windows-1253
|
||||
Windows-1254
|
||||
Windows-1255
|
||||
Windows-1256
|
||||
Windows-1257
|
||||
Windows-1258
|
||||
X-User-Defined
|
||||
```
|
||||
Encoding masks:
|
||||
```
|
||||
Asterisk
|
||||
BackQuote
|
||||
BackSlash
|
||||
Colon
|
||||
CrLf
|
||||
Ctl
|
||||
Del
|
||||
Dollar
|
||||
Dot
|
||||
DoubleQuote
|
||||
Exclamation
|
||||
Hash
|
||||
InvalidUtf8
|
||||
LeftCrLfHtVt
|
||||
LeftPeriod
|
||||
LeftSpace
|
||||
LeftTilde
|
||||
LtGt
|
||||
None
|
||||
Percent
|
||||
Pipe
|
||||
Question
|
||||
Raw
|
||||
RightCrLfHtVt
|
||||
RightPeriod
|
||||
RightSpace
|
||||
Semicolon
|
||||
SingleQuote
|
||||
Slash
|
||||
SquareBracket
|
||||
```
|
||||
Examples:
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,uppercase"
|
||||
// Output: STORIES/THE QUICK BROWN FOX!.TXT
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,replace=Fox:Turtle" --name-transform "all,replace=Quick:Slow"
|
||||
// Output: stories/The Slow Brown Turtle!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,base64encode"
|
||||
// Output: c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0" --name-transform "all,base64decode"
|
||||
// Output: stories/The Quick Brown Fox!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfc"
|
||||
// Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,nfd"
|
||||
// Output: stories/The Quick Brown 🦊 Fox Went to the Café!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown 🦊 Fox!.txt" --name-transform "all,ascii"
|
||||
// Output: stories/The Quick Brown Fox!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,trimsuffix=.txt"
|
||||
// Output: stories/The Quick Brown Fox!
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,prefix=OLD_"
|
||||
// Output: OLD_stories/OLD_The Quick Brown Fox!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,charmap=ISO-8859-7"
|
||||
// Output: stories/The Quick Brown _ Fox Went to the Caf_!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox: A Memoir [draft].txt" --name-transform "all,encoder=Colon,SquareBracket"
|
||||
// Output: stories/The Quick Brown Fox: A Memoir [draft].txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown 🦊 Fox Went to the Café!.txt" --name-transform "all,truncate=21"
|
||||
// Output: stories/The Quick Brown 🦊 Fox
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,command=echo"
|
||||
// Output: stories/The Quick Brown Fox!.txt
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{YYYYMMDD}"
|
||||
// Output: stories/The Quick Brown Fox!-20250618
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!" --name-transform "date=-{macfriendlytime}"
|
||||
// Output: stories/The Quick Brown Fox!-2025-06-18 0148PM
|
||||
```
|
||||
|
||||
```
|
||||
rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,regex=[\\.\\w]/ab"
|
||||
// Output: ababababababab/ababab ababababab ababababab ababab!abababab
|
||||
```
|
||||
|
||||
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
|
||||
The `--name-transform` flag is also available in `sync`, `copy`, and `move`.
|
||||
|
||||
# Files vs Directories
|
||||
|
||||
By default `--name-transform` will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the `--name-transform`.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
| `file` | Only transform the leaf name of files (DEFAULT) |
|
||||
| `dir` | Only transform name of directories - these may appear anywhere in the path |
|
||||
| `all` | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: `--name-transform file,prefix=ABC` or `--name-transform dir,prefix=DEF`.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example `--name-transform all,nfc`.
|
||||
|
||||
Note that `--name-transform` may not add path separators `/` to the name. This will cause an error.
|
||||
|
||||
# Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the `file` tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the `dir` tag is in use then directories anywhere in the path will be transformed
|
||||
* If the `all` tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a `/` or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as `prefix` followed by `trimprefix` or `nfc` followed by `nfd`, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using `--dry-run` before execution.
|
||||
|
||||
# Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as `replace=old:new`, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use `--dry-run` to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with `--transfers=1` if necessary.
|
||||
* Certain transformations (e.g. `prefix`) will have a multiplying effect every time they are used. Avoid these when using `bisync`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
rclone convmv dest:path --name-transform XXX [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
```
|
||||
--create-empty-src-dirs Create empty source dirs on destination after move
|
||||
--delete-empty-src-dirs Delete empty source dirs after move
|
||||
-h, --help help for convmv
|
||||
```
|
||||
|
||||
Options shared with other commands are described next.
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
### Copy Options
|
||||
|
||||
Flags for anything which can copy a file
|
||||
|
||||
```
|
||||
--check-first Do all the checks before starting transfers
|
||||
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
|
||||
--compare-dest stringArray Include additional server-side paths during comparison
|
||||
--copy-dest stringArray Implies --compare-dest but also copies files from paths into destination
|
||||
--cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD)
|
||||
--ignore-case-sync Ignore case when synchronizing
|
||||
--ignore-checksum Skip post copy check of checksums
|
||||
--ignore-existing Skip all files that exist on destination
|
||||
--ignore-size Ignore size when skipping use modtime or checksum
|
||||
-I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
|
||||
--immutable Do not modify files, fail if existing files have been modified
|
||||
--inplace Download directly to destination file instead of atomic download to temp/rename
|
||||
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
|
||||
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
|
||||
--max-duration Duration Maximum duration rclone will transfer data for (default 0s)
|
||||
--max-transfer SizeSuffix Maximum size of data to transfer (default off)
|
||||
-M, --metadata If set, preserve metadata when copying objects
|
||||
--modify-window Duration Max time diff to be considered the same (default 1ns)
|
||||
--multi-thread-chunk-size SizeSuffix Chunk size for multi-thread downloads / uploads, if not set by filesystem (default 64Mi)
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
--no-update-modtime Don't update destination modtime if files identical
|
||||
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
|
||||
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
|
||||
--refresh-times Refresh the modtime of remote files
|
||||
--server-side-across-configs Allow server-side operations (e.g. copy) to work across different configs
|
||||
--size-only Skip based on size only, not modtime or checksum
|
||||
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown, upload starts after reaching cutoff or when file ends (default 100Ki)
|
||||
-u, --update Skip files that are newer on the destination
|
||||
```
|
||||
|
||||
### Important Options
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
### Filter Options
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
--exclude-if-present stringArray Exclude directories if filename is present
|
||||
--files-from stringArray Read list of source-file names from file (use - to read from stdin)
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
--max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--max-depth int If set limits the recursion depth to this (default -1)
|
||||
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||
--metadata-exclude stringArray Exclude metadatas matching pattern
|
||||
--metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
|
||||
--metadata-filter stringArray Add a metadata filtering rule
|
||||
--metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
|
||||
--metadata-include stringArray Include metadatas matching pattern
|
||||
--metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
|
||||
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
|
||||
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
|
||||
```
|
||||
|
||||
### Listing Options
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
@@ -116,6 +116,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -152,6 +153,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -36,6 +36,8 @@ This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
|
||||
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
|
||||
|
||||
|
||||
@@ -79,6 +81,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -115,6 +118,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -17,7 +17,7 @@ Setting `--auto-filename` will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With `--auto-filename-header` in addition, if a specific filename is
|
||||
With `--header-filename` in addition, if a specific filename is
|
||||
set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With `--print-filename` in addition, the resulting file name will be
|
||||
printed.
|
||||
@@ -28,7 +28,7 @@ destination if there is one with the same name.
|
||||
Setting `--stdout` or making the output file name `-`
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
## Troublshooting
|
||||
## Troubleshooting
|
||||
|
||||
If you can't get `rclone copyurl` to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -99,6 +99,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -75,6 +75,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -36,6 +36,7 @@ Run without a hash to see the list of all supported hashes, e.g.
|
||||
* whirlpool
|
||||
* crc32
|
||||
* sha256
|
||||
* sha512
|
||||
|
||||
Then
|
||||
|
||||
@@ -74,6 +75,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -70,6 +70,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -81,6 +81,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -178,6 +178,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -150,6 +150,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -71,6 +71,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -58,6 +58,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -571,11 +571,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -900,6 +900,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -951,6 +990,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -980,6 +1020,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -91,6 +91,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -127,6 +128,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -82,6 +82,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -118,6 +119,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -98,6 +98,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -572,11 +572,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -901,6 +901,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -957,6 +996,7 @@ rclone nfsmount remote:path /path/to/mountpoint [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -986,6 +1026,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -15,6 +15,9 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the `--checkers` global flag. However, some backends will
|
||||
implement this command directly, in which case `--checkers` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
`--dry-run` or the `--interactive`/`-i` flag.
|
||||
|
||||
|
||||
@@ -126,7 +126,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the `--rc-user` and `--rc-pass` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with `--user-from-header` (e.g., `--rc---user-from-header=x-remote-user`).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the `--client-ca` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -190,6 +194,7 @@ Flags to control the Remote Control API
|
||||
--rc-server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
|
||||
--rc-template string User-specified template
|
||||
--rc-user string User name for authentication
|
||||
--rc-user-from-header string User name from a defined HTTP header
|
||||
--rc-web-fetch-url string URL to fetch the releases for webgui (default "https://api.github.com/repos/rclone/rclone-webui-react/releases/latest")
|
||||
--rc-web-gui Launch WebGUI on localhost
|
||||
--rc-web-gui-force-update Force update to latest version of web gui
|
||||
|
||||
@@ -134,11 +134,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -463,6 +463,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -500,6 +539,7 @@ rclone serve dlna remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -527,6 +567,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -146,11 +146,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -475,6 +475,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -531,6 +570,7 @@ rclone serve docker [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -560,6 +600,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -127,11 +127,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -456,6 +456,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -577,6 +616,7 @@ rclone serve ftp remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -604,6 +644,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -128,7 +128,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the `--user` and `--pass` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with `--user-from-header` (e.g., `----user-from-header=x-remote-user`).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the `--client-ca` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -245,11 +249,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -574,6 +578,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -664,19 +707,19 @@ rclone serve http remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080])
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default 127.0.0.1:8080)
|
||||
--allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--auth-proxy string A program to use to create the backend from the auth
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cert string Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
--client-ca string Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 777)
|
||||
--file-perms FileMode File permissions (default 666)
|
||||
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
-h, --help help for http
|
||||
--htpasswd string A htpasswd file - if not provided no authentication is done
|
||||
--key string Path to TLS PEM private key file
|
||||
--key string TLS PEM Private key
|
||||
--link-perms FileMode Link permissions (default 666)
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
@@ -694,6 +737,7 @@ rclone serve http remote:path [flags]
|
||||
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
--umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002)
|
||||
--user string User name for authentication
|
||||
--user-from-header string User name from a defined HTTP header
|
||||
--vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
|
||||
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
|
||||
@@ -704,6 +748,7 @@ rclone serve http remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -731,6 +776,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -7,8 +7,6 @@ versionIntroduced: v1.65
|
||||
---
|
||||
# rclone serve nfs
|
||||
|
||||
*Not available in Windows.*
|
||||
|
||||
Serve the remote as an NFS mount
|
||||
|
||||
## Synopsis
|
||||
@@ -55,7 +53,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
only. It requires running rclone as root or with `CAP_DAC_READ_SEARCH`.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary `sudo setcap cap_dac_read_search+ep /path/to/rclone`.
|
||||
|
||||
@@ -79,6 +77,12 @@ Where `$PORT` is the same port number used in the `serve nfs` command
|
||||
and `$HOSTNAME` is the network address of the machine that `serve nfs`
|
||||
was run on.
|
||||
|
||||
If `--vfs-metadata-extension` is in use then for the `--nfs-cache-type disk`
|
||||
and `--nfs-cache-type cache` the metadata files will have the file
|
||||
handle of their parent file suffixed with `0x00, 0x00, 0x00, 0x01`.
|
||||
This means they can be looked up directly from the parent file handle
|
||||
is desired.
|
||||
|
||||
This command is only available on Unix platforms.
|
||||
|
||||
## VFS - Virtual File System
|
||||
@@ -178,11 +182,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -507,6 +511,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -543,6 +586,7 @@ rclone serve nfs remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -570,6 +614,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -162,7 +162,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the `--user` and `--pass` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with `--user-from-header` (e.g., `----user-from-header=x-remote-user`).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the `--client-ca` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -191,16 +195,16 @@ rclone serve restic remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080])
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default 127.0.0.1:8080)
|
||||
--allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--append-only Disallow deletion of repository data
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cache-objects Cache listed objects (default true)
|
||||
--cert string Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
--client-ca string Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
-h, --help help for restic
|
||||
--htpasswd string A htpasswd file - if not provided no authentication is done
|
||||
--key string Path to TLS PEM private key file
|
||||
--key string TLS PEM Private key
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
--pass string Password for authentication
|
||||
@@ -211,6 +215,7 @@ rclone serve restic remote:path [flags]
|
||||
--server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
|
||||
--stdio Run an HTTP2 server on stdin/stdout
|
||||
--user string User name for authentication
|
||||
--user-from-header string User name from a defined HTTP header
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -27,7 +27,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
@@ -82,7 +82,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
## Bugs
|
||||
@@ -154,7 +154,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the `--user` and `--pass` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with `--user-from-header` (e.g., `----user-from-header=x-remote-user`).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the `--client-ca` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -334,11 +338,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -663,6 +667,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
|
||||
|
||||
```
|
||||
@@ -672,22 +715,22 @@ rclone serve s3 remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080])
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default 127.0.0.1:8080)
|
||||
--allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key
|
||||
--auth-proxy string A program to use to create the backend from the auth
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cert string Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
--client-ca string Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 777)
|
||||
--etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5")
|
||||
--file-perms FileMode File permissions (default 666)
|
||||
--force-path-style If true use path style access if false use virtual hosted style (default true) (default true)
|
||||
--force-path-style If true use path style access if false use virtual hosted style (default true)
|
||||
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
-h, --help help for s3
|
||||
--htpasswd string A htpasswd file - if not provided no authentication is done
|
||||
--key string Path to TLS PEM private key file
|
||||
--key string TLS PEM Private key
|
||||
--link-perms FileMode Link permissions (default 666)
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
@@ -705,6 +748,7 @@ rclone serve s3 remote:path [flags]
|
||||
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
--umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002)
|
||||
--user string User name for authentication
|
||||
--user-from-header string User name from a defined HTTP header
|
||||
--vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
|
||||
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
|
||||
@@ -715,6 +759,7 @@ rclone serve s3 remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -742,6 +787,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -170,11 +170,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -499,6 +499,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -620,6 +659,7 @@ rclone serve sftp remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -647,6 +687,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -171,7 +171,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the `--user` and `--pass` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with `--user-from-header` (e.g., `----user-from-header=x-remote-user`).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the `--client-ca` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -288,11 +292,11 @@ seconds. If rclone is quit or dies with files that haven't been
|
||||
uploaded, these will be uploaded next time rclone is run with the same
|
||||
flags.
|
||||
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
|
||||
If using `--vfs-cache-max-size` or `--vfs-cache-min-free-space` note
|
||||
that the cache may exceed these quotas for two reasons. Firstly
|
||||
because it is only checked every `--vfs-cache-poll-interval`. Secondly
|
||||
because open files cannot be evicted from the cache. When
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
|
||||
`--vfs-cache-max-size` or `--vfs-cache-min-free-space` is exceeded,
|
||||
rclone will attempt to evict the least accessed files from the cache
|
||||
first. rclone will start with files that haven't been accessed for the
|
||||
longest. This cache flushing strategy is efficient and more relevant
|
||||
@@ -617,6 +621,45 @@ _WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
|
||||
result is accurate. However, this is very inefficient and may cost lots of API
|
||||
calls resulting in extra charges. Use it as a last resort and only with caching.
|
||||
|
||||
## VFS Metadata
|
||||
|
||||
If you use the `--vfs-metadata-extension` flag you can get the VFS to
|
||||
expose files which contain the [metadata](/docs/#metadata) as a JSON
|
||||
blob. These files will not appear in the directory listing, but can be
|
||||
`stat`-ed and opened and once they have been they **will** appear in
|
||||
directory listings until the directory cache expires.
|
||||
|
||||
Note that some backends won't create metadata unless you pass in the
|
||||
`--metadata` flag.
|
||||
|
||||
For example, using `rclone mount` with `--metadata --vfs-metadata-extension .metadata`
|
||||
we get
|
||||
|
||||
```
|
||||
$ ls -l /mnt/
|
||||
total 1048577
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
|
||||
$ cat /mnt/1G.metadata
|
||||
{
|
||||
"atime": "2025-03-04T17:34:22.317069787Z",
|
||||
"btime": "2025-03-03T16:03:37.708253808Z",
|
||||
"gid": "1000",
|
||||
"mode": "100664",
|
||||
"mtime": "2025-03-03T16:03:39.640238323Z",
|
||||
"uid": "1000"
|
||||
}
|
||||
|
||||
$ ls -l /mnt/
|
||||
total 1048578
|
||||
-rw-rw-r-- 1 user user 1073741824 Mar 3 16:03 1G
|
||||
-rw-rw-r-- 1 user user 185 Mar 3 16:03 1G.metadata
|
||||
```
|
||||
|
||||
If the file has no metadata it will be returned as `{}` and if there
|
||||
is an error reading the metadata the error will be returned as
|
||||
`{"error":"error string"}`.
|
||||
|
||||
## Auth Proxy
|
||||
|
||||
If you supply the parameter `--auth-proxy /path/to/program` then
|
||||
@@ -707,12 +750,12 @@ rclone serve webdav remote:path [flags]
|
||||
## Options
|
||||
|
||||
```
|
||||
--addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080])
|
||||
--addr stringArray IPaddress:Port or :Port to bind server to (default 127.0.0.1:8080)
|
||||
--allow-origin string Origin which cross-domain request (CORS) can be executed from
|
||||
--auth-proxy string A program to use to create the backend from the auth
|
||||
--baseurl string Prefix for URLs - leave blank for root
|
||||
--cert string Path to TLS PEM public key certificate file (can also include intermediate/CA certificates)
|
||||
--client-ca string Path to TLS PEM CA file with certificate authorities to verify clients with
|
||||
--cert string TLS PEM key (concatenation of certificate and CA certificate)
|
||||
--client-ca string Client certificate authority to verify clients with
|
||||
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
|
||||
--dir-perms FileMode Directory permissions (default 777)
|
||||
--disable-dir-list Disable HTML directory list on GET request for a directory
|
||||
@@ -721,7 +764,7 @@ rclone serve webdav remote:path [flags]
|
||||
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
-h, --help help for webdav
|
||||
--htpasswd string A htpasswd file - if not provided no authentication is done
|
||||
--key string Path to TLS PEM private key file
|
||||
--key string TLS PEM Private key
|
||||
--link-perms FileMode Link permissions (default 666)
|
||||
--max-header-bytes int Maximum size of request header (default 4096)
|
||||
--min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
|
||||
@@ -739,6 +782,7 @@ rclone serve webdav remote:path [flags]
|
||||
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
|
||||
--umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002)
|
||||
--user string User name for authentication
|
||||
--user-from-header string User name from a defined HTTP header
|
||||
--vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
|
||||
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
|
||||
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
|
||||
@@ -749,6 +793,7 @@ rclone serve webdav remote:path [flags]
|
||||
--vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
|
||||
--vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
|
||||
--vfs-links Translate symlinks to/from regular files with a '.rclonelink' extension for the VFS
|
||||
--vfs-metadata-extension string Set the extension to read metadata from
|
||||
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
|
||||
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
|
||||
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
|
||||
@@ -776,6 +821,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -61,6 +61,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -56,6 +56,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -147,6 +147,7 @@ Flags for anything which can copy a file
|
||||
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size (default 256Mi)
|
||||
--multi-thread-streams int Number of streams to use for multi-thread downloads (default 4)
|
||||
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
|
||||
--name-transform stringArray Transform paths during the copy process
|
||||
--no-check-dest Don't check the destination, copy regardless
|
||||
--no-traverse Don't traverse destination file system on copy
|
||||
--no-update-dir-modtime Don't update directory modification times
|
||||
@@ -171,6 +172,7 @@ Flags used for sync commands
|
||||
--delete-during When synchronizing, delete files during transfer
|
||||
--fix-case Force rename of case insensitive dest to match source
|
||||
--ignore-errors Delete even if there are I/O errors
|
||||
--list-cutoff int To save memory, sort directory listings on disk above this threshold (default 1000000)
|
||||
--max-delete int When synchronizing, limit the number of deletes (default -1)
|
||||
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
|
||||
--suffix string Suffix to add to changed files
|
||||
@@ -202,6 +204,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
@@ -19,6 +19,7 @@ unless `--no-create` or `--recursive` is provided.
|
||||
If `--recursive` is used then recursively sets the modification
|
||||
time on all existing files that is found under the path. Filters are supported,
|
||||
and you can test with the `--dry-run` or the `--interactive`/`-i` flag.
|
||||
This will touch `--transfers` files concurrently.
|
||||
|
||||
If `--timestamp` is used then sets the modification time to that
|
||||
time instead of the current time. Times may be specified as one of:
|
||||
@@ -71,6 +72,7 @@ Flags for filtering directory listings
|
||||
--files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
|
||||
-f, --filter stringArray Add a file filtering rule
|
||||
--filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
|
||||
--hash-filter string Partition filenames by hash k/n or randomly @/n
|
||||
--ignore-case Ignore case in filters (case insensitive)
|
||||
--include stringArray Include files matching pattern
|
||||
--include-from stringArray Read file include patterns from file (use - to read from stdin)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user