mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
18 Commits
fix-max-me
...
pr-8538-tr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbce609665 | ||
|
|
62e8512711 | ||
|
|
53bdd58085 | ||
|
|
7b9f8eca00 | ||
|
|
433ed18e91 | ||
|
|
34a20555ca | ||
|
|
f20ee1488b | ||
|
|
3273bf3716 | ||
|
|
f5501edfcf | ||
|
|
2404831725 | ||
|
|
9f0e237931 | ||
|
|
f752eaa298 | ||
|
|
1f8373fae8 | ||
|
|
b94f80b9d7 | ||
|
|
5f4e983ccb | ||
|
|
28b6f38135 | ||
|
|
6adb4056bb | ||
|
|
0b9671313b |
18
README.md
18
README.md
@@ -1,20 +1,4 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -532,7 +532,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -629,7 +631,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -482,6 +483,14 @@ Example:
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_is_hardlink",
|
||||
Default: false,
|
||||
@@ -545,6 +554,7 @@ type Options struct {
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
CopyIsHardlink bool `config:"copy_is_hardlink"`
|
||||
}
|
||||
|
||||
@@ -570,6 +580,7 @@ type Fs struct {
|
||||
savedpswd string
|
||||
sessions atomic.Int32 // count in use sessions
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -867,6 +878,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
|
||||
@@ -31,6 +31,8 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
|
||||
40
backend/webdav/tus-errors.go
Normal file
40
backend/webdav/tus-errors.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrChunkSize is returned when the chunk size is zero
|
||||
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
|
||||
// ErrNilLogger is returned when the logger is nil
|
||||
ErrNilLogger = errors.New("tus logger can't be nil")
|
||||
// ErrNilStore is returned when the store is nil
|
||||
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
|
||||
// ErrNilUpload is returned when the upload is nil
|
||||
ErrNilUpload = errors.New("tus upload can't be nil")
|
||||
// ErrLargeUpload is returned when the upload body is to large
|
||||
ErrLargeUpload = errors.New("tus upload body is to large")
|
||||
// ErrVersionMismatch is returned when the tus protocol version is mismatching
|
||||
ErrVersionMismatch = errors.New("tus protocol version mismatch")
|
||||
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
|
||||
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
|
||||
// ErrUploadNotFound is returned when the tus upload is not found
|
||||
ErrUploadNotFound = errors.New("tus upload not found")
|
||||
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
|
||||
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
|
||||
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
|
||||
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
|
||||
)
|
||||
|
||||
// ClientError represents an error state of a client
|
||||
type ClientError struct {
|
||||
Code int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Error returns an error string containing the client error code
|
||||
func (c ClientError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code: %d", c.Code)
|
||||
}
|
||||
88
backend/webdav/tus-upload.go
Normal file
88
backend/webdav/tus-upload.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Metadata is a typedef for a string to string map to hold metadata
|
||||
type Metadata map[string]string
|
||||
|
||||
// Upload is a struct containing the file status during upload
|
||||
type Upload struct {
|
||||
stream io.ReadSeeker
|
||||
size int64
|
||||
offset int64
|
||||
|
||||
Fingerprint string
|
||||
Metadata Metadata
|
||||
}
|
||||
|
||||
// Updates the Upload information based on offset.
|
||||
func (u *Upload) updateProgress(offset int64) {
|
||||
u.offset = offset
|
||||
}
|
||||
|
||||
// Finished returns whether this upload is finished or not.
|
||||
func (u *Upload) Finished() bool {
|
||||
return u.offset >= u.size
|
||||
}
|
||||
|
||||
// Progress returns the progress in a percentage.
|
||||
func (u *Upload) Progress() int64 {
|
||||
return (u.offset * 100) / u.size
|
||||
}
|
||||
|
||||
// Offset returns the current upload offset.
|
||||
func (u *Upload) Offset() int64 {
|
||||
return u.offset
|
||||
}
|
||||
|
||||
// Size returns the size of the upload body.
|
||||
func (u *Upload) Size() int64 {
|
||||
return u.size
|
||||
}
|
||||
|
||||
// EncodedMetadata encodes the upload metadata.
|
||||
func (u *Upload) EncodedMetadata() string {
|
||||
var encoded []string
|
||||
|
||||
for k, v := range u.Metadata {
|
||||
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
|
||||
}
|
||||
|
||||
return strings.Join(encoded, ",")
|
||||
}
|
||||
|
||||
func b64encode(s string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(s))
|
||||
}
|
||||
|
||||
// NewUpload creates a new upload from an io.Reader.
|
||||
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
|
||||
stream, ok := reader.(io.ReadSeeker)
|
||||
|
||||
if !ok {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := buf.ReadFrom(reader)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
stream = bytes.NewReader(buf.Bytes())
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata)
|
||||
}
|
||||
|
||||
return &Upload{
|
||||
stream: stream,
|
||||
size: size,
|
||||
|
||||
Fingerprint: fingerprint,
|
||||
Metadata: metadata,
|
||||
}
|
||||
}
|
||||
191
backend/webdav/tus-uploader.go
Normal file
191
backend/webdav/tus-uploader.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Uploader holds all information about a currently running upload
|
||||
type Uploader struct {
|
||||
fs *Fs
|
||||
url string
|
||||
upload *Upload
|
||||
offset int64
|
||||
aborted bool
|
||||
uploadSubs []chan Upload
|
||||
notifyChan chan bool
|
||||
overridePatchMethod bool
|
||||
}
|
||||
|
||||
// NotifyUploadProgress subscribes to progress updates.
|
||||
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
|
||||
u.uploadSubs = append(u.uploadSubs, c)
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
|
||||
if resp == nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 204:
|
||||
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
|
||||
*newOff = off
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
||||
case 409:
|
||||
return false, ErrOffsetMismatch
|
||||
case 412:
|
||||
return false, ErrVersionMismatch
|
||||
case 413:
|
||||
return false, ErrLargeUpload
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
|
||||
var method string
|
||||
|
||||
if !u.overridePatchMethod {
|
||||
method = "PATCH"
|
||||
} else {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
|
||||
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
|
||||
extraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
|
||||
if u.overridePatchMethod {
|
||||
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
|
||||
}
|
||||
|
||||
url, err := url.Parse(u.url)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
|
||||
}
|
||||
|
||||
// FIXME: Use GetBody func as in chunking.go
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Path: url.Path,
|
||||
NoResponse: true,
|
||||
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
|
||||
ContentLength: &size,
|
||||
Body: body,
|
||||
ContentType: "application/offset+octet-stream",
|
||||
ExtraHeaders: extraHeaders,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
var newOffset int64
|
||||
|
||||
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err := u.fs.srv.Call(ctx, &opts)
|
||||
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("uploadChunk failed: %w", err)
|
||||
// FIXME What do we do here? Remove the entire upload?
|
||||
// See https://github.com/tus/tusd/issues/176
|
||||
}
|
||||
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
// Upload uploads the entire body to the server.
|
||||
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
|
||||
cnt := 1
|
||||
|
||||
fs.Debug(u.fs, "Uploaded starts")
|
||||
for u.offset < u.upload.size && !u.aborted {
|
||||
err := u.UploadChunk(ctx, cnt, options...)
|
||||
cnt++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fs.Debug(u.fs, "-- Uploaded finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadChunk uploads a single chunk.
|
||||
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
|
||||
chunkSize := u.fs.opt.ChunkSize
|
||||
data := make([]byte, chunkSize)
|
||||
|
||||
_, err := u.upload.stream.Seek(u.offset, 0)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := u.upload.stream.Read(data)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
body := bytes.NewBuffer(data[:size])
|
||||
|
||||
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
|
||||
} else {
|
||||
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
u.offset = newOffset
|
||||
|
||||
u.upload.updateProgress(u.offset)
|
||||
|
||||
u.notifyChan <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Waits for a signal to broadcast to all subscribers
|
||||
func (u *Uploader) broadcastProgress() {
|
||||
for range u.notifyChan {
|
||||
for _, c := range u.uploadSubs {
|
||||
c <- *u.upload
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader.
|
||||
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
|
||||
notifyChan := make(chan bool)
|
||||
|
||||
uploader := &Uploader{
|
||||
f,
|
||||
url,
|
||||
upload,
|
||||
offset,
|
||||
false,
|
||||
nil,
|
||||
notifyChan,
|
||||
false,
|
||||
}
|
||||
|
||||
go uploader.broadcastProgress()
|
||||
|
||||
return uploader
|
||||
}
|
||||
108
backend/webdav/tus.go
Normal file
108
backend/webdav/tus.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
Chunked upload based on the tus protocol for ownCloud Infinite Scale
|
||||
See https://tus.io/protocols/resumable-upload
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
fn := filepath.Base(src.Remote())
|
||||
metadata := map[string]string{
|
||||
"filename": fn,
|
||||
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
|
||||
"filetype": contentType,
|
||||
}
|
||||
|
||||
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
|
||||
fingerprint := ""
|
||||
|
||||
// create an upload from a file.
|
||||
upload := NewUpload(in, src.Size(), metadata, fingerprint)
|
||||
|
||||
// create the uploader.
|
||||
uploader, err := o.CreateUploader(ctx, upload, options...)
|
||||
if err == nil {
|
||||
// start the uploading process.
|
||||
err = uploader.Upload(ctx, options...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 201:
|
||||
location := resp.Header.Get("Location")
|
||||
return false, location, nil
|
||||
case 412:
|
||||
return false, "", ErrVersionMismatch
|
||||
case 413:
|
||||
return false, "", ErrLargeUpload
|
||||
}
|
||||
|
||||
retry, err := f.shouldRetry(ctx, resp, err)
|
||||
return retry, "", err
|
||||
}
|
||||
|
||||
// CreateUploader creates a new upload to the server.
|
||||
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
|
||||
if u == nil {
|
||||
return nil, ErrNilUpload
|
||||
}
|
||||
|
||||
// if c.Config.Resume && len(u.Fingerprint) == 0 {
|
||||
// return nil, ErrFingerprintNotSet
|
||||
// }
|
||||
|
||||
l := int64(0)
|
||||
p := o.filePath()
|
||||
// cut the filename off
|
||||
dir, _ := filepath.Split(p)
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: dir,
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.endpointURL,
|
||||
ContentLength: &l,
|
||||
ExtraHeaders: o.extraHeaders(ctx, o),
|
||||
Options: options,
|
||||
}
|
||||
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
|
||||
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
|
||||
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
|
||||
|
||||
var tusLocation string
|
||||
// rclone http call
|
||||
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var retry bool
|
||||
res, err := o.fs.srv.Call(ctx, &opts)
|
||||
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
|
||||
uploader := NewUploader(o.fs, tusLocation, u, 0)
|
||||
|
||||
return uploader, nil
|
||||
}
|
||||
@@ -84,7 +84,10 @@ func init() {
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
Value: "owncloud",
|
||||
Help: "Owncloud",
|
||||
Help: "Owncloud 10 PHP based WebDAV server",
|
||||
}, {
|
||||
Value: "infinitescale",
|
||||
Help: "ownCloud Infinite Scale",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Sharepoint Online, authenticated by Microsoft account",
|
||||
@@ -212,6 +215,7 @@ type Fs struct {
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
canTus bool // supports the TUS upload protocol
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
propsetMtime bool // set if can use propset
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
@@ -632,6 +636,15 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
case "infinitescale":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = false
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = false
|
||||
f.canTus = true
|
||||
f.opt.ChunkSize = 10 * fs.Mebi
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
@@ -1329,7 +1342,7 @@ func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
@@ -1373,7 +1386,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
@@ -1499,9 +1512,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
|
||||
fs.Debugf(src, "Update will use the tus protocol to upload")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
err = o.updateViaTus(ctx, in, contentType, src, options...)
|
||||
if err != nil {
|
||||
fs.Debug(src, "tus update failed.")
|
||||
return fmt.Errorf("tus update failed: %w", err)
|
||||
}
|
||||
} else if o.shouldUseChunkedUpload(src) {
|
||||
if o.fs.opt.Vendor == "nextcloud" {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
} else {
|
||||
fs.Debug(src, "Chunking - unknown vendor")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1513,10 +1538,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unchunked simple update failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
@@ -1526,7 +1550,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/cleanup"
|
||||
_ "github.com/rclone/rclone/cmd/cmount"
|
||||
_ "github.com/rclone/rclone/cmd/config"
|
||||
_ "github.com/rclone/rclone/cmd/convmv"
|
||||
_ "github.com/rclone/rclone/cmd/copy"
|
||||
_ "github.com/rclone/rclone/cmd/copyto"
|
||||
_ "github.com/rclone/rclone/cmd/copyurl"
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
@@ -85,81 +83,3 @@ func (am AliasMap) Alias(name1 string) string {
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +11,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
)
|
||||
|
||||
// Prefer describes strategies for resolving sync conflicts
|
||||
@@ -97,8 +96,8 @@ func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
||||
}
|
||||
// replace glob variables, if any
|
||||
t := time.Now() // capture static time here so it is the same for all files throughout this run
|
||||
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
|
||||
// append dot (intentionally allow more than one)
|
||||
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
|
||||
@@ -130,6 +129,7 @@ type (
|
||||
path2 namePair
|
||||
}
|
||||
)
|
||||
|
||||
type namePair struct {
|
||||
oldName string
|
||||
newName string
|
||||
@@ -240,24 +240,7 @@ func SuffixName(ctx context.Context, remote, suffix string) string {
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
return transform.SuffixKeepExtension(remote, suffix)
|
||||
}
|
||||
return remote + suffix
|
||||
}
|
||||
|
||||
108
cmd/convmv/convmv.go
Normal file
108
cmd/convmv/convmv.go
Normal file
@@ -0,0 +1,108 @@
|
||||
// Package convmv provides the convmv command.
|
||||
package convmv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.SprintList()+`
|
||||
|
||||
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories ##
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
| ¡file¡ | Only transform the leaf name of files (DEFAULT) |
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
|
||||
## Ordering and Conflicts ##
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior ##
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
|
||||
* To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst, srcFileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(false, true, command, func() error {
|
||||
if !transform.Transforming(context.Background()) {
|
||||
return errors.New("--name-transform must be set")
|
||||
}
|
||||
if srcFileName == "" {
|
||||
return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
}
|
||||
return operations.TransformFile(context.Background(), fdst, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
253
cmd/convmv/convmv_test.go
Normal file
253
cmd/convmv/convmv_test.go
Normal file
@@ -0,0 +1,253 @@
|
||||
// Package convmv provides the convmv command.
|
||||
package convmv
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all backends
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Some times used in the tests
|
||||
var (
|
||||
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
debug = ``
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
type args struct {
|
||||
TransformOpt []string
|
||||
TransformBackOpt []string
|
||||
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{name: "NFC", args: args{
|
||||
TransformOpt: []string{"all,nfc"},
|
||||
TransformBackOpt: []string{"all,nfd"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFD", args: args{
|
||||
TransformOpt: []string{"all,nfd"},
|
||||
TransformBackOpt: []string{"all,nfc"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "base64", args: args{
|
||||
TransformOpt: []string{"all,base64encode"},
|
||||
TransformBackOpt: []string{"all,base64encode"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "prefix", args: args{
|
||||
TransformOpt: []string{"all,prefix=PREFIX"},
|
||||
TransformBackOpt: []string{"all,trimprefix=PREFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "suffix", args: args{
|
||||
TransformOpt: []string{"all,suffix=SUFFIX"},
|
||||
TransformBackOpt: []string{"all,trimsuffix=SUFFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "truncate", args: args{
|
||||
TransformOpt: []string{"all,truncate=10"},
|
||||
TransformBackOpt: []string{"all,truncate=10"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "encoder", args: args{
|
||||
TransformOpt: []string{"all,encoder=Colon,SquareBracket"},
|
||||
TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "ISO-8859-1", args: args{
|
||||
TransformOpt: []string{"all,ISO-8859-1"},
|
||||
TransformBackOpt: []string{"all,ISO-8859-1"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "charmap", args: args{
|
||||
TransformOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "lowercase", args: args{
|
||||
TransformOpt: []string{"all,lowercase"},
|
||||
TransformBackOpt: []string{"all,lowercase"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "ascii", args: args{
|
||||
TransformOpt: []string{"all,ascii"},
|
||||
TransformBackOpt: []string{"all,ascii"},
|
||||
Lossless: false,
|
||||
}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Flocal)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
items := makeTestFiles(t, r, "dir1")
|
||||
err := r.Fremote.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
err = r.Flocal.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
|
||||
err = transform.SetOptions(ctx, tt.args.TransformOpt...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
transformedItems := transformItems(ctx, t, items)
|
||||
r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)})
|
||||
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
|
||||
require.NoError(t, err)
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, transformedItems)
|
||||
|
||||
if tt.args.Lossless {
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
|
||||
const alphabet = "abcdefg123456789"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
t.Helper()
|
||||
n := 0
|
||||
// Create test files
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName = strings.ToValidUTF8(fileName, "")
|
||||
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
|
||||
|
||||
if debug != "" {
|
||||
fileName = debug
|
||||
}
|
||||
|
||||
item := r.WriteObject(context.Background(), fileName, fileName, t1)
|
||||
r.WriteFile(fileName, fileName, t1)
|
||||
items = append(items, item)
|
||||
n++
|
||||
|
||||
if debug != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, extra := range extras {
|
||||
item := r.WriteObject(context.Background(), extra, extra, t1)
|
||||
r.WriteFile(extra, extra, t1)
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func deleteDSStore(t *testing.T, r *fstest.Run) {
|
||||
ctxDSStore, fi := filter.AddConfig(context.Background())
|
||||
err := fi.AddRule(`+ *.DS_Store`)
|
||||
assert.NoError(t, err)
|
||||
err = fi.AddRule(`- **`)
|
||||
assert.NoError(t, err)
|
||||
err = operations.Delete(ctxDSStore, r.Fremote)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
var entries fs.DirEntries
|
||||
|
||||
deleteDSStore(t, r)
|
||||
err := walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(e fs.DirEntries) error {
|
||||
entries = append(entries, e...)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
entries = slices.DeleteFunc(entries, func(E fs.DirEntry) bool { // remove those pesky .DS_Store files
|
||||
if strings.Contains(E.Remote(), ".DS_Store") {
|
||||
err := operations.DeleteFile(context.Background(), E.(fs.Object))
|
||||
assert.NoError(t, err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
require.Equal(t, len(items), entries.Len())
|
||||
|
||||
// sort by CONVERTED name
|
||||
slices.SortStableFunc(items, func(a, b fstest.Item) int {
|
||||
aConv := transform.Path(ctx, a.Path, false)
|
||||
bConv := transform.Path(ctx, b.Path, false)
|
||||
return cmp.Compare(aConv, bConv)
|
||||
})
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Remote(), b.Remote())
|
||||
})
|
||||
|
||||
for i, e := range entries {
|
||||
expect := transform.Path(ctx, items[i].Path, false)
|
||||
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
|
||||
assert.Equal(t, expect, e.Remote(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item {
|
||||
transformedItems := []fstest.Item{}
|
||||
for _, item := range items {
|
||||
newPath := transform.Path(ctx, item.Path, false)
|
||||
newItem := item
|
||||
newItem.Path = newPath
|
||||
transformedItems = append(transformedItems, newItem)
|
||||
}
|
||||
return transformedItems
|
||||
}
|
||||
|
||||
func detectEncoding(s string) string {
|
||||
if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "BOTH"
|
||||
}
|
||||
if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "NFD"
|
||||
}
|
||||
if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) {
|
||||
return "NFC"
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
@@ -191,7 +191,6 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
})
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
}
|
||||
|
||||
// Traverse the tree of commands running fn on each
|
||||
|
||||
@@ -14,7 +14,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
|
||||
@@ -81,7 +81,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
w.handler = http.NewServeMux()
|
||||
w.handler = w.faker.Server()
|
||||
|
||||
if proxy.Opt.AuthProxy != "" {
|
||||
|
||||
@@ -961,3 +961,9 @@ put them back in again.` >}}
|
||||
* Markus Gerstel <markus.gerstel@osirium.com>
|
||||
* simwai <16225108+simwai@users.noreply.github.com>
|
||||
* Ben Alex <ben.alex@acegi.com.au>
|
||||
* Klaas Freitag <opensource@freisturz.de> <klaas.freitag@kiteworks.com>
|
||||
* Andrew Kreimer <algonell@gmail.com>
|
||||
* Ed Craig-Wood <138211970+edc-w@users.noreply.github.com>
|
||||
* Christian Richter <crichter@owncloud.com> <1058116+dragonchaser@users.noreply.github.com>
|
||||
* Ralf Haferkamp <r.haferkamp@opencloud.eu>
|
||||
* Jugal Kishore <me@devjugal.com>
|
||||
|
||||
@@ -5,6 +5,50 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.2 - 2025-05-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)
|
||||
|
||||
* Bug fixes
|
||||
* accounting: Fix percentDiff calculation -- (Anagh Kumar Baranwal)
|
||||
* build
|
||||
* Update github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update golang.org/x/crypto to v0.35.0 to fix CVE-2025-22869 (Nick Craig-Wood)
|
||||
* Update golang.org/x/net from 0.36.0 to 0.38.0 to fix CVE-2025-22870 (dependabot[bot])
|
||||
* Update golang.org/x/net to 0.36.0. to fix CVE-2025-22869 (dependabot[bot])
|
||||
* Stop building with go < go1.23 as security updates forbade it (Nick Craig-Wood)
|
||||
* Fix docker plugin build (Anagh Kumar Baranwal)
|
||||
* cmd: Fix crash if rclone is invoked without any arguments (Janne Hellsten)
|
||||
* config: Read configuration passwords from stdin even when terminated with EOF (Samantha Bowen)
|
||||
* doc fixes (Andrew Kreimer, Danny Garside, eccoisle, Ed Craig-Wood, emyarod, jack, Jugal Kishore, Markus Gerstel, Michael Kebe, Nick Craig-Wood, simonmcnair, simwai, Zachary Vorhies)
|
||||
* fs: Fix corruption of SizeSuffix with "B" suffix in config (eg --min-size) (Nick Craig-Wood)
|
||||
* lib/http: Fix race between Serve() and Shutdown() (Nick Craig-Wood)
|
||||
* object: Fix memory object out of bounds Seek (Nick Craig-Wood)
|
||||
* operations: Fix call fmt.Errorf with wrong err (alingse)
|
||||
* rc
|
||||
* Disable the metrics server when running `rclone rc` (hiddenmarten)
|
||||
* Fix debug/* commands not being available over unix sockets (Nick Craig-Wood)
|
||||
* serve nfs: Fix unlikely crash (Nick Craig-Wood)
|
||||
* stats: Fix the speed not getting updated after a pause in the processing (Anagh Kumar Baranwal)
|
||||
* sync
|
||||
* Fix cpu spinning when empty directory finding with leading slashes (Nick Craig-Wood)
|
||||
* Copy dir modtimes even when copyEmptySrcDirs is false (ll3006)
|
||||
* VFS
|
||||
* Fix directory cache serving stale data (Lorenz Brun)
|
||||
* Fix inefficient directory caching when directory reads are slow (huanghaojun)
|
||||
* Fix integration test failures (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Metadata: fix error when setting copy-requires-writer-permission on a folder (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry link without expiry (Dave Vasilevsky)
|
||||
* HTTP
|
||||
* Correct root if definitely pointing to a file (nielash)
|
||||
* Iclouddrive
|
||||
* Fix so created files are writable (Ben Alex)
|
||||
* Onedrive
|
||||
* Fix metadata ordering in permissions (Nick Craig-Wood)
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
@@ -60,7 +104,7 @@ description: "Rclone Changelog"
|
||||
* fs: Make `--links` flag global and add new `--local-links` and `--vfs-links` flags (Nick Craig-Wood)
|
||||
* http servers: Disable automatic authentication skipping for unix sockets in http servers (Moises Lima)
|
||||
* This was making it impossible to use unix sockets with an proxy
|
||||
* This might now cause rclone to need authenticaton where it didn't before
|
||||
* This might now cause rclone to need authentication where it didn't before
|
||||
* oauthutil: add support for OAuth client credential flow (Martin Hassack, Nick Craig-Wood)
|
||||
* operations: make log messages consistent for mkdir/rmdir at INFO level (Nick Craig-Wood)
|
||||
* rc: Add `relative` to [vfs/queue-set-expiry](/rc/#vfs-queue-set-expiry) (Nick Craig-Wood)
|
||||
@@ -738,7 +782,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Update all dependencies (Nick Craig-Wood)
|
||||
* Refactor version info and icon resource handling on windows (albertony)
|
||||
* doc updates (albertony, alfish2000, asdffdsazqqq, Dimitri Papadopoulos, Herby Gillot, Joda Stößer, Manoj Ghosh, Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metatadata with a user supplied program (Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metadata with a user supplied program (Nick Craig-Wood)
|
||||
* Add `ChunkWriterDoesntSeek` feature flag and set it for b2 (Nick Craig-Wood)
|
||||
* lib/http: Export basic go string functions for use in `--template` (Gabriel Espinoza)
|
||||
* makefile: Use POSIX compatible install arguments (Mina Galić)
|
||||
@@ -853,7 +897,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix multipart upload: corrupted on transfer: sizes differ XXX vs 0 (Nick Craig-Wood)
|
||||
* Fix locking window when getting mutipart upload URL (Nick Craig-Wood)
|
||||
* Fix locking window when getting multipart upload URL (Nick Craig-Wood)
|
||||
* Fix server side copies greater than 4GB (Nick Craig-Wood)
|
||||
* Fix chunked streaming uploads (Nick Craig-Wood)
|
||||
* Reduce default `--b2-upload-concurrency` to 4 to reduce memory usage (Nick Craig-Wood)
|
||||
|
||||
@@ -1557,6 +1557,32 @@ Setting `--max-buffer-memory` allows the buffer memory to be
|
||||
controlled so that it doesn't overwhelm the machine and allows
|
||||
`--transfers` to be set large.
|
||||
|
||||
### --max-connections=N ###
|
||||
|
||||
This sets the maximum number of concurrent calls to the backend API.
|
||||
It may not map 1:1 to TCP or HTTP connections depending on the backend
|
||||
in use and the use of HTTP1 vs HTTP2.
|
||||
|
||||
When downloading files, backends only limit the initial opening of the
|
||||
stream. The bulk data download is not counted as a connection. This
|
||||
means that the `--max--connections` flag won't limit the total number
|
||||
of downloads.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure `--max-connections` is
|
||||
one more than the sum of `--transfers` and `--checkers`.
|
||||
|
||||
If you use `--check-first` then `--max-connections` just needs to be
|
||||
one more than the maximum of `--checkers` and `--transfers`.
|
||||
|
||||
So for `--max-connections 3` you'd use `--checkers 2 --transfers 2
|
||||
--check-first` or `--checkers 1 --transfers 1`.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads to limit the number of simultaneous parts being transferred.
|
||||
|
||||
### --max-delete=N ###
|
||||
|
||||
This tells rclone not to delete more than N files. If that limit is
|
||||
@@ -1848,6 +1874,13 @@ If the backend has a `--backend-upload-concurrency` setting (eg
|
||||
number of transfers instead if it is larger than the value of
|
||||
`--multi-thread-streams` or `--multi-thread-streams` isn't set.
|
||||
|
||||
### --name-transform COMMAND[=XXXX] ###
|
||||
`--name-transform` introduces path name transformations for
|
||||
`rclone copy`, `rclone sync`, and `rclone move`. These transformations
|
||||
enable modifications to source and destination file names by applying
|
||||
prefixes, suffixes, and other alterations during transfer operations.
|
||||
For detailed docs and examples, see [`convmv`](/commands/rclone_convmv/).
|
||||
|
||||
### --no-check-dest ###
|
||||
|
||||
The `--no-check-dest` can be used with `move` or `copy` and it causes
|
||||
|
||||
@@ -104,11 +104,11 @@ To copy a local directory to an WebDAV directory called backup
|
||||
### Modification times and hashes
|
||||
|
||||
Plain WebDAV does not support modified times. However when used with
|
||||
Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
|
||||
Fastmail Files, ownCloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Likewise plain WebDAV does not support hashes, however when used with
|
||||
Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
|
||||
Depending on the exact version of Owncloud or Nextcloud hashes may
|
||||
Fastmail Files, ownCloud or Nextcloud rclone will support SHA1 and MD5 hashes.
|
||||
Depending on the exact version of ownCloud or Nextcloud hashes may
|
||||
appear on all objects, or only on objects which had a hash uploaded
|
||||
with them.
|
||||
|
||||
@@ -355,19 +355,28 @@ this as the password.
|
||||
|
||||
Fastmail supports modified times using the `X-OC-Mtime` header.
|
||||
|
||||
### Owncloud
|
||||
### ownCloud
|
||||
|
||||
Click on the settings cog in the bottom right of the page and this
|
||||
will show the WebDAV URL that rclone needs in the config step. It
|
||||
will look something like `https://example.com/remote.php/webdav/`.
|
||||
|
||||
Owncloud supports modified times using the `X-OC-Mtime` header.
|
||||
ownCloud supports modified times using the `X-OC-Mtime` header.
|
||||
|
||||
### Nextcloud
|
||||
|
||||
This is configured in an identical way to Owncloud. Note that
|
||||
This is configured in an identical way to ownCloud. Note that
|
||||
Nextcloud initially did not support streaming of files (`rcat`) whereas
|
||||
Owncloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
|
||||
ownCloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
|
||||
|
||||
### ownCloud Infinite Scale
|
||||
|
||||
The WebDAV URL for Infinite Scale can be found in the details panel of
|
||||
any space in Infinite Scale, if the display was enabled in the personal
|
||||
settings of the user through a checkbox there.
|
||||
|
||||
Infinite Scale works with the chunking [tus](https://tus.io) upload protocol.
|
||||
The chunk size is currently fixed 10 MB.
|
||||
|
||||
### Sharepoint Online
|
||||
|
||||
|
||||
32
fs/config.go
32
fs/config.go
@@ -545,31 +545,16 @@ var ConfigOptionsInfo = Options{{
|
||||
Help: "Add partial-suffix to temporary file name when --inplace is not used",
|
||||
Groups: "Copy",
|
||||
}, {
|
||||
Name: "max_connections",
|
||||
Help: strings.ReplaceAll(`Maximum number of simultaneous connections, 0 for unlimited.
|
||||
|
||||
This sets the maximum number of connections made to the backend on a
|
||||
per backend basis. Connections in this case are calls to the backend
|
||||
API and may not map 1:1 to TCP or HTTP connections depending on the
|
||||
backend in use.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure |--max-connections| is
|
||||
one more than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then |--max-connections| just needs to be
|
||||
one more than the maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |--max-connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads or downloads to limit the number of total connections.
|
||||
`, "|", "`"),
|
||||
Name: "max_connections",
|
||||
Help: "Maximum number of simultaneous backend API connections, 0 for unlimited.",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "name_transform",
|
||||
Default: []string{},
|
||||
Help: "Transform paths during the copy process.",
|
||||
Groups: "Copy",
|
||||
}}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
@@ -681,6 +666,7 @@ type ConfigInfo struct {
|
||||
PartialSuffix string `config:"partial_suffix"`
|
||||
MetadataMapper SpaceSepList `config:"metadata_mapper"`
|
||||
MaxConnections int `config:"max_connections"`
|
||||
NameTransform []string `config:"name_transform"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -60,9 +61,9 @@ type Marcher interface {
|
||||
// Note: this will flag filter-aware backends on the source side
|
||||
func (m *March) init(ctx context.Context) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll)
|
||||
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll, m.srcKey)
|
||||
if !m.NoTraverse {
|
||||
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll)
|
||||
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll, m.dstKey)
|
||||
}
|
||||
// Now create the matching transform
|
||||
// ..normalise the UTF8 first
|
||||
@@ -80,13 +81,26 @@ func (m *March) init(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// key turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) key(entry fs.DirEntry) string {
|
||||
// srcKey turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) srcKey(entry fs.DirEntry) string {
|
||||
if entry == nil {
|
||||
return ""
|
||||
}
|
||||
name := path.Base(entry.Remote())
|
||||
for _, transform := range m.transforms {
|
||||
name = transform.Path(m.Ctx, name, fs.DirEntryType(entry) == "directory")
|
||||
return transforms(name, m.transforms)
|
||||
}
|
||||
|
||||
// dstKey turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) dstKey(entry fs.DirEntry) string {
|
||||
if entry == nil {
|
||||
return ""
|
||||
}
|
||||
return transforms(path.Base(entry.Remote()), m.transforms)
|
||||
}
|
||||
|
||||
func transforms(name string, transforms []matchTransformFn) string {
|
||||
for _, transform := range transforms {
|
||||
name = transform(name)
|
||||
}
|
||||
return name
|
||||
@@ -95,14 +109,14 @@ func (m *March) key(entry fs.DirEntry) string {
|
||||
// makeListDir makes constructs a listing function for the given fs
|
||||
// and includeAll flags for marching through the file system.
|
||||
// Note: this will optionally flag filter-aware backends!
|
||||
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listDirFn {
|
||||
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool, keyFn list.KeyFn) listDirFn {
|
||||
ci := fs.GetConfig(ctx)
|
||||
fi := filter.GetConfig(ctx)
|
||||
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
|
||||
!(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse)
|
||||
return func(dir string, callback fs.ListRCallback) (err error) {
|
||||
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
|
||||
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, m.key)
|
||||
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, keyFn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +151,7 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
|
||||
// in syncing as it will use the first entry for the sync
|
||||
// comparison.
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(m.key(a), m.key(b))
|
||||
return cmp.Compare(keyFn(a), keyFn(b))
|
||||
})
|
||||
return callback(entries)
|
||||
}
|
||||
@@ -290,11 +304,11 @@ func (m *March) matchListings(srcChan, dstChan <-chan fs.DirEntry, srcOnly, dstO
|
||||
// Reload src and dst if needed - we set them to nil if used
|
||||
if src == nil {
|
||||
src = <-srcChan
|
||||
srcName = m.key(src)
|
||||
srcName = m.srcKey(src)
|
||||
}
|
||||
if dst == nil {
|
||||
dst = <-dstChan
|
||||
dstName = m.key(dst)
|
||||
dstName = m.dstKey(dst)
|
||||
}
|
||||
if src == nil && dst == nil {
|
||||
break
|
||||
@@ -399,7 +413,7 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
|
||||
if m.NoTraverse && !m.NoCheckDest {
|
||||
originalSrcChan := srcChan
|
||||
srcChan = make(chan fs.DirEntry, 100)
|
||||
ls, err := list.NewSorter(m.Ctx, m.Fdst, list.SortToChan(dstChan), m.key)
|
||||
ls, err := list.NewSorter(m.Ctx, m.Fdst, list.SortToChan(dstChan), m.dstKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -449,7 +463,6 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
|
||||
noDst: true,
|
||||
})
|
||||
}
|
||||
|
||||
}, func(dst fs.DirEntry) {
|
||||
recurse := m.Callback.DstOnly(dst)
|
||||
if recurse && job.dstDepth > 0 {
|
||||
|
||||
@@ -491,7 +491,11 @@ func TestMatchListings(t *testing.T) {
|
||||
// Make a channel to send the source (0) or dest (1) using a list.Sorter
|
||||
makeChan := func(offset int) <-chan fs.DirEntry {
|
||||
out := make(chan fs.DirEntry)
|
||||
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), m.key)
|
||||
key := m.dstKey
|
||||
if offset == 0 {
|
||||
key = m.srcKey
|
||||
}
|
||||
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), key)
|
||||
require.NoError(t, err)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
)
|
||||
|
||||
// State of the copy
|
||||
@@ -390,7 +391,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
f: f,
|
||||
dstFeatures: f.Features(),
|
||||
dst: dst,
|
||||
remote: remote,
|
||||
remote: transform.Path(ctx, remote, false),
|
||||
src: src,
|
||||
ci: ci,
|
||||
tr: tr,
|
||||
@@ -399,7 +400,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
}
|
||||
c.hashType, c.hashOption = CommonHash(ctx, f, src.Fs())
|
||||
if c.dst != nil {
|
||||
c.remote = c.dst.Remote()
|
||||
c.remote = transform.Path(ctx, c.dst.Remote(), false)
|
||||
}
|
||||
// Are we using partials?
|
||||
//
|
||||
@@ -414,5 +415,5 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
|
||||
// CopyFile moves a single file possibly to a new name
|
||||
func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true)
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true, false)
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -424,6 +425,8 @@ func MoveTransfer(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string,
|
||||
|
||||
// move - see Move for help
|
||||
func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object, isTransfer bool) (newDst fs.Object, err error) {
|
||||
origRemote := remote // avoid double-transform on fallback to copy
|
||||
remote = transform.Path(ctx, remote, false)
|
||||
ci := fs.GetConfig(ctx)
|
||||
var tr *accounting.Transfer
|
||||
if isTransfer {
|
||||
@@ -447,12 +450,14 @@ func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && (fdst.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
|
||||
if dst != nil {
|
||||
remote = dst.Remote()
|
||||
remote = transform.Path(ctx, dst.Remote(), false)
|
||||
if !SameObject(src, dst) {
|
||||
err = DeleteFile(ctx, dst)
|
||||
if err != nil {
|
||||
return newDst, err
|
||||
}
|
||||
} else if src.Remote() == remote {
|
||||
return newDst, nil
|
||||
} else if needsMoveCaseInsensitive(fdst, fdst, remote, src.Remote(), false) {
|
||||
doMove = func(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
return MoveCaseInsensitive(ctx, fdst, fdst, remote, src.Remote(), false, src)
|
||||
@@ -488,7 +493,7 @@ func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
}
|
||||
}
|
||||
// Move not found or didn't work so copy dst <- src
|
||||
newDst, err = Copy(ctx, fdst, dst, remote, src)
|
||||
newDst, err = Copy(ctx, fdst, dst, origRemote, src)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "Not deleting source as copy failed: %v", err)
|
||||
return newDst, err
|
||||
@@ -516,24 +521,7 @@ func SuffixName(ctx context.Context, remote string) string {
|
||||
return remote
|
||||
}
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + ci.Suffix + exts
|
||||
return transform.SuffixKeepExtension(remote, ci.Suffix)
|
||||
}
|
||||
return remote + ci.Suffix
|
||||
}
|
||||
@@ -1994,12 +1982,12 @@ func MoveCaseInsensitive(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileNam
|
||||
}
|
||||
|
||||
// moveOrCopyFile moves or copies a single file possibly to a new name
|
||||
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
|
||||
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool, allowOverlap bool) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
logger, usingLogger := GetLogger(ctx)
|
||||
dstFilePath := path.Join(fdst.Root(), dstFileName)
|
||||
srcFilePath := path.Join(fsrc.Root(), srcFileName)
|
||||
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
|
||||
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath && !allowOverlap {
|
||||
fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
|
||||
if usingLogger {
|
||||
srcObj, _ := fsrc.NewObject(ctx, srcFileName)
|
||||
@@ -2106,7 +2094,14 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
//
|
||||
// This is treated as a transfer.
|
||||
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false, false)
|
||||
}
|
||||
|
||||
// TransformFile transforms a file in place using --name-transform
|
||||
//
|
||||
// This is treated as a transfer.
|
||||
func TransformFile(ctx context.Context, fdst fs.Fs, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fdst, srcFileName, srcFileName, false, true)
|
||||
}
|
||||
|
||||
// SetTier changes tier of object in remote
|
||||
@@ -2211,50 +2206,10 @@ func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
|
||||
|
||||
// AddModTime adds file's Mod Time to output
|
||||
func (l *ListFormat) AddModTime(timeFormat string) {
|
||||
switch timeFormat {
|
||||
case "":
|
||||
if timeFormat == "" {
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
} else {
|
||||
timeFormat = transform.TimeFormat(timeFormat)
|
||||
}
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ModTime.When.Local().Format(timeFormat)
|
||||
|
||||
@@ -62,7 +62,7 @@ func rcList(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
var list = []*ListJSONItem{}
|
||||
list := []*ListJSONItem{}
|
||||
err = ListJSON(ctx, f, remote, &opt, func(item *ListJSONItem) error {
|
||||
list = append(list, item)
|
||||
return nil
|
||||
@@ -193,7 +193,7 @@ func rcMoveOrCopyFile(ctx context.Context, in rc.Params, cp bool) (out rc.Params
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp)
|
||||
return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp, false)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -289,7 +289,6 @@ func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bo
|
||||
|
||||
var request *http.Request
|
||||
request, err := in.GetHTTPRequest()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -629,12 +628,12 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var opt = map[string]string{}
|
||||
opt := map[string]string{}
|
||||
err = in.GetStructMissingOK("opt", &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var arg = []string{}
|
||||
arg := []string{}
|
||||
err = in.GetStructMissingOK("arg", &arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -642,7 +641,6 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
result, err := doCommand(ctx, command, arg, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("command %q failed: %w", command, err)
|
||||
|
||||
}
|
||||
out = make(rc.Params)
|
||||
out["result"] = result
|
||||
@@ -685,7 +683,6 @@ func rcDu(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
dir, err := in.GetString("dir")
|
||||
if rc.IsErrParamNotFound(err) {
|
||||
dir = config.GetCacheDir()
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -95,6 +96,7 @@ type syncCopyMove struct {
|
||||
setDirModTimes []setDirModTime // directories that need their modtime set
|
||||
setDirModTimesMaxLevel int // max level of the directories to set
|
||||
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
||||
allowOverlap bool // whether we allow src and dst to overlap (i.e. for convmv)
|
||||
}
|
||||
|
||||
// For keeping track of delayed modtime sets
|
||||
@@ -126,8 +128,8 @@ func (strategy trackRenamesStrategy) leaf() bool {
|
||||
return (strategy & trackRenamesStrategyLeaf) != 0
|
||||
}
|
||||
|
||||
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) {
|
||||
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool, allowOverlap bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) && !allowOverlap {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -161,6 +163,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
setDirModTime: (!ci.NoUpdateDirModTime && fsrc.Features().CanHaveEmptyDirectories) && (fdst.Features().WriteDirSetModTime || fdst.Features().MkdirMetadata != nil || fdst.Features().DirSetModTime != nil),
|
||||
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
||||
modifiedDirs: make(map[string]struct{}),
|
||||
allowOverlap: allowOverlap,
|
||||
}
|
||||
|
||||
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
||||
@@ -922,7 +925,7 @@ func (s *syncCopyMove) tryRename(src fs.Object) bool {
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if operations.Same(s.fdst, s.fsrc) {
|
||||
if operations.Same(s.fdst, s.fsrc) && !s.allowOverlap {
|
||||
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
@@ -1122,6 +1125,9 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
}
|
||||
if transform.Transforming(ctx) && newDst != nil && src.Remote() != newDst.Remote() {
|
||||
s.markParentNotEmpty(src)
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
if newDst != nil {
|
||||
@@ -1254,8 +1260,8 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
s.logger(s.ctx, operations.MissingOnDst, src, nil, fs.ErrorIsDir)
|
||||
|
||||
// Create the directory and make sure the Metadata/ModTime is correct
|
||||
s.copyDirMetadata(s.ctx, s.fdst, nil, x.Remote(), x)
|
||||
s.markDirModified(x.Remote())
|
||||
s.copyDirMetadata(s.ctx, s.fdst, nil, transform.Path(s.ctx, x.Remote(), true), x)
|
||||
s.markDirModified(transform.Path(s.ctx, x.Remote(), true))
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
@@ -1288,7 +1294,11 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
s.markParentNotEmpty(src)
|
||||
srcX = fs.NewOverrideDirectory(srcX, transform.Path(ctx, src.Remote(), true))
|
||||
src = srcX
|
||||
if !transform.Transforming(ctx) || src.Remote() != dst.Remote() {
|
||||
s.markParentNotEmpty(src)
|
||||
}
|
||||
dstX, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
s.logger(s.ctx, operations.Match, src, dst, fs.ErrorIsDir)
|
||||
@@ -1327,7 +1337,7 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
// If DoMove is true then files will be moved instead of copied.
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool, allowOverlap bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
@@ -1338,7 +1348,7 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs, allowOverlap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1349,7 +1359,7 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs, allowOverlap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1359,17 +1369,22 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
// Sync fsrc into fdst
|
||||
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// Transform renames fdst in place
|
||||
func Transform(ctx context.Context, fdst fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fdst, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs, true)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -2980,7 +2981,7 @@ func predictDstFromLogger(ctx context.Context) context.Context {
|
||||
if winner.Err != nil {
|
||||
errMsg = ";" + winner.Err.Error()
|
||||
}
|
||||
operations.SyncFprintf(opt.JSON, "%s;%s;%v;%s%s\n", file.ModTime(ctx).Local().Format(timeFormat), checksum, file.Size(), file.Remote(), errMsg)
|
||||
operations.SyncFprintf(opt.JSON, "%s;%s;%v;%s%s\n", file.ModTime(ctx).Local().Format(timeFormat), checksum, file.Size(), transform.Path(ctx, file.Remote(), false), errMsg) // TODO: should the transform be handled in the sync instead of here?
|
||||
}
|
||||
}
|
||||
return operations.WithSyncLogger(ctx, opt)
|
||||
|
||||
483
fs/sync/sync_transform_test.go
Normal file
483
fs/sync/sync_transform_test.go
Normal file
@@ -0,0 +1,483 @@
|
||||
// Test transform
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
var debug = ``
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
type args struct {
|
||||
TransformOpt []string
|
||||
TransformBackOpt []string
|
||||
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{name: "NFC", args: args{
|
||||
TransformOpt: []string{"nfc"},
|
||||
TransformBackOpt: []string{"nfd"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFD", args: args{
|
||||
TransformOpt: []string{"nfd"},
|
||||
TransformBackOpt: []string{"nfc"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "base64", args: args{
|
||||
TransformOpt: []string{"base64encode"},
|
||||
TransformBackOpt: []string{"base64encode"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "prefix", args: args{
|
||||
TransformOpt: []string{"prefix=PREFIX"},
|
||||
TransformBackOpt: []string{"trimprefix=PREFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "suffix", args: args{
|
||||
TransformOpt: []string{"suffix=SUFFIX"},
|
||||
TransformBackOpt: []string{"trimsuffix=SUFFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "truncate", args: args{
|
||||
TransformOpt: []string{"truncate=10"},
|
||||
TransformBackOpt: []string{"truncate=10"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "encoder", args: args{
|
||||
TransformOpt: []string{"encoder=Colon,SquareBracket"},
|
||||
TransformBackOpt: []string{"decoder=Colon,SquareBracket"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "ISO-8859-1", args: args{
|
||||
TransformOpt: []string{"ISO-8859-1"},
|
||||
TransformBackOpt: []string{"ISO-8859-1"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "charmap", args: args{
|
||||
TransformOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "lowercase", args: args{
|
||||
TransformOpt: []string{"all,lowercase"},
|
||||
TransformBackOpt: []string{"all,lowercase"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "ascii", args: args{
|
||||
TransformOpt: []string{"all,ascii"},
|
||||
TransformBackOpt: []string{"all,ascii"},
|
||||
Lossless: false,
|
||||
}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Flocal)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
items := makeTestFiles(t, r, "dir1")
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, nil)
|
||||
r.CheckLocalListing(t, items, nil)
|
||||
|
||||
err := transform.SetOptions(ctx, tt.args.TransformOpt...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
|
||||
require.NoError(t, err)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
if tt.args.Lossless {
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteItems(t, items...)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const alphabet = "abcdefg123456789"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
t.Helper()
|
||||
n := 0
|
||||
// Create test files
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName = strings.ToValidUTF8(fileName, "")
|
||||
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
|
||||
|
||||
if debug != "" {
|
||||
fileName = debug
|
||||
}
|
||||
|
||||
item := r.WriteObject(context.Background(), fileName, fileName, t1)
|
||||
r.WriteFile(fileName, fileName, t1)
|
||||
items = append(items, item)
|
||||
n++
|
||||
|
||||
if debug != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, extra := range extras {
|
||||
item := r.WriteObject(context.Background(), extra, extra, t1)
|
||||
r.WriteFile(extra, extra, t1)
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func deleteDSStore(t *testing.T, r *fstest.Run) {
|
||||
ctxDSStore, fi := filter.AddConfig(context.Background())
|
||||
err := fi.AddRule(`+ *.DS_Store`)
|
||||
assert.NoError(t, err)
|
||||
err = fi.AddRule(`- **`)
|
||||
assert.NoError(t, err)
|
||||
err = operations.Delete(ctxDSStore, r.Fremote)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
var entries fs.DirEntries
|
||||
|
||||
deleteDSStore(t, r)
|
||||
err := walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(e fs.DirEntries) error {
|
||||
entries = append(entries, e...)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
entries = slices.DeleteFunc(entries, func(E fs.DirEntry) bool { // remove those pesky .DS_Store files
|
||||
if strings.Contains(E.Remote(), ".DS_Store") {
|
||||
err := operations.DeleteFile(context.Background(), E.(fs.Object))
|
||||
assert.NoError(t, err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
require.Equal(t, len(items), entries.Len())
|
||||
|
||||
// sort by CONVERTED name
|
||||
slices.SortStableFunc(items, func(a, b fstest.Item) int {
|
||||
aConv := transform.Path(ctx, a.Path, false)
|
||||
bConv := transform.Path(ctx, b.Path, false)
|
||||
return cmp.Compare(aConv, bConv)
|
||||
})
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Remote(), b.Remote())
|
||||
})
|
||||
|
||||
for i, e := range entries {
|
||||
expect := transform.Path(ctx, items[i].Path, false)
|
||||
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
|
||||
assert.Equal(t, expect, e.Remote(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func detectEncoding(s string) string {
|
||||
if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "BOTH"
|
||||
}
|
||||
if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "NFD"
|
||||
}
|
||||
if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) {
|
||||
return "NFC"
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
|
||||
func TestTransformCopy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,suffix_keep_extension=_somesuffix")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("sub dir/hello world.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("sub dir_somesuffix/hello world_somesuffix.txt", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestDoubleTransform(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestFileTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "file,prefix=tac", "file,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestNoTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "prefix=tac", "prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestDirTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "dir,prefix=tac", "dir,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"empty_dir", "toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
}
|
||||
|
||||
func TestAllTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"empty_dir", "toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
err = operations.Check(ctx, &operations.CheckOpt{Fsrc: r.Flocal, Fdst: r.Fremote}) // should not error even though dst has transformed names
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRunTwice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "dir,prefix=tac", "dir,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1))
|
||||
|
||||
// result should not change second time, since src is unchanged
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestSyntax(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := transform.SetOptions(ctx, "prefix")
|
||||
assert.Error(t, err) // should error as required value is missing
|
||||
|
||||
err = transform.SetOptions(ctx, "banana")
|
||||
assert.Error(t, err) // should error as unrecognized option
|
||||
|
||||
err = transform.SetOptions(ctx, "=123")
|
||||
assert.Error(t, err) // should error as required key is missing
|
||||
|
||||
err = transform.SetOptions(ctx, "prefix=123")
|
||||
assert.NoError(t, err) // should not error
|
||||
}
|
||||
|
||||
func TestConflicting(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "prefix=tac", "trimprefix=tac")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// should result in no change as prefix and trimprefix cancel out
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/toe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = MoveDir(ctx, r.Fremote, r.Flocal, true, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
}
|
||||
|
||||
func TestTransformFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = MoveDir(ctx, r.Fremote, r.Flocal, true, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
|
||||
err = transform.SetOptions(ctx, "all,trimprefix=tic", "all,trimprefix=tac")
|
||||
require.NoError(t, err)
|
||||
err = operations.TransformFile(ctx, r.Fremote, "tictactoe/tictactoe/tictactoe.txt")
|
||||
require.NoError(t, err)
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe", "toe", "toe/toe"})
|
||||
}
|
||||
|
||||
func TestBase64(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,base64encode")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("dG9l/dG9l/dG9lLnR4dA==", "hello world", t1))
|
||||
|
||||
// round trip
|
||||
err = transform.SetOptions(ctx, "all,base64decode")
|
||||
require.NoError(t, err)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Flocal, r.Fremote, true)
|
||||
testLoggerVsLsf(ctx, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("dG9l/dG9l/dG9lLnR4dA==", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=ta/c") // has illegal character
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
// ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
assert.Error(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
}
|
||||
@@ -399,6 +399,12 @@ backends:
|
||||
- TestIntegration/FsMkdir/FsEncoding/punctuation
|
||||
- TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
|
||||
fastlist: false
|
||||
- backend: "webdav"
|
||||
remote: "TestWebdavInfiniteScale:"
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsEncoding/punctuation
|
||||
- TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
|
||||
fastlist: false
|
||||
- backend: "webdav"
|
||||
remote: "TestWebdavRclone:"
|
||||
ignore:
|
||||
|
||||
49
fstest/testserver/init.d/TestWebdavInfiniteScale
Executable file
49
fstest/testserver/init.d/TestWebdavInfiniteScale
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
NAME=infinitescale
|
||||
USER=admin
|
||||
PASS=admin
|
||||
PORT=9200
|
||||
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
|
||||
docker run --rm --name $NAME \
|
||||
-v $(pwd):/etc/ocis \
|
||||
-e "OCIS_INSECURE=true" \
|
||||
-e "IDM_ADMIN_PASSWORD=$PASS" \
|
||||
-e "OCIS_FORCE_CONFIG_OVERWRITE=true" \
|
||||
-e "OCIS_URL=https://127.0.0.1:$PORT" \
|
||||
owncloud/ocis \
|
||||
init
|
||||
|
||||
docker run --rm -d --name $NAME \
|
||||
-e "OCIS_LOG_LEVEL=debug" \
|
||||
-e "OCIS_LOG_PRETTY=true" \
|
||||
-e "OCIS_URL=https://127.0.0.1:$PORT" \
|
||||
-e "OCIS_ADMIN_USER_ID=some-admin-user-id-0000-100000000000" \
|
||||
-e "IDM_ADMIN_PASSWORD=$PASS" \
|
||||
-e "OCIS_INSECURE=true" \
|
||||
-e "PROXY_ENABLE_BASIC_AUTH=true" \
|
||||
-v $(pwd):/etc/ocis \
|
||||
-p 127.0.0.1:${PORT}:9200 \
|
||||
owncloud/ocis
|
||||
|
||||
echo type=webdav
|
||||
echo url=https://127.0.0.1:${PORT}/dav/spaces/some-admin-user-id-0000-100000000000
|
||||
echo user=$USER
|
||||
echo pass=$(rclone obscure $PASS)
|
||||
echo vendor=infinitescale
|
||||
echo _connect=127.0.0.1:${PORT}
|
||||
}
|
||||
|
||||
stop() {
|
||||
# Clean up the mess
|
||||
docker stop infinitescale
|
||||
rm -f ./ocis.yaml
|
||||
}
|
||||
|
||||
. $(dirname "$0")/run.bash
|
||||
12
go.mod
12
go.mod
@@ -81,12 +81,12 @@ require (
|
||||
github.com/zeebo/blake3 v0.2.4
|
||||
go.etcd.io/bbolt v1.4.0
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.35.0
|
||||
golang.org/x/net v0.36.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/sync v0.11.0
|
||||
golang.org/x/sys v0.30.0
|
||||
golang.org/x/text v0.22.0
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/time v0.10.0
|
||||
google.golang.org/api v0.223.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
@@ -243,5 +243,5 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/pkg/xattr v0.4.10
|
||||
golang.org/x/mobile v0.0.0-20250218173827-cd096645fcd3
|
||||
golang.org/x/term v0.29.0
|
||||
golang.org/x/term v0.30.0
|
||||
)
|
||||
|
||||
24
go.sum
24
go.sum
@@ -703,8 +703,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -789,8 +789,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -814,8 +814,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -870,8 +870,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -887,8 +887,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -904,8 +904,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -151,8 +151,8 @@ func init() {
|
||||
alias("Dot", EncodeDot)
|
||||
}
|
||||
|
||||
// validStrings returns all the valid MultiEncoder strings
|
||||
func validStrings() string {
|
||||
// ValidStrings returns all the valid MultiEncoder strings
|
||||
func ValidStrings() string {
|
||||
var out []string
|
||||
for k := range nameToEncoding {
|
||||
out = append(out, k)
|
||||
@@ -192,7 +192,7 @@ func (mask *MultiEncoder) Set(in string) error {
|
||||
} else {
|
||||
i, err := strconv.ParseUint(part, 0, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad encoding %q: possible values are: %s", part, validStrings())
|
||||
return fmt.Errorf("bad encoding %q: possible values are: %s", part, ValidStrings())
|
||||
}
|
||||
out |= MultiEncoder(i)
|
||||
}
|
||||
@@ -313,8 +313,7 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeAsterisk) { // *
|
||||
switch r {
|
||||
case '*',
|
||||
'*':
|
||||
case '*', '*':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -346,64 +345,55 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeQuestion) { // ?
|
||||
switch r {
|
||||
case '?',
|
||||
'?':
|
||||
case '?', '?':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeColon) { // :
|
||||
switch r {
|
||||
case ':',
|
||||
':':
|
||||
case ':', ':':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodePipe) { // |
|
||||
switch r {
|
||||
case '|',
|
||||
'|':
|
||||
case '|', '|':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeDoubleQuote) { // "
|
||||
switch r {
|
||||
case '"',
|
||||
'"':
|
||||
case '"', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeSingleQuote) { // '
|
||||
switch r {
|
||||
case '\'',
|
||||
''':
|
||||
case '\'', ''':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeBackQuote) { // `
|
||||
switch r {
|
||||
case '`',
|
||||
'`':
|
||||
case '`', '`':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeDollar) { // $
|
||||
switch r {
|
||||
case '$',
|
||||
'$':
|
||||
case '$', '$':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeSlash) { // /
|
||||
switch r {
|
||||
case '/',
|
||||
'/':
|
||||
case '/', '/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeBackSlash) { // \
|
||||
switch r {
|
||||
case '\\',
|
||||
'\':
|
||||
case '\\', '\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -416,15 +406,13 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeHash) { // #
|
||||
switch r {
|
||||
case '#',
|
||||
'#':
|
||||
case '#', '#':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodePercent) { // %
|
||||
switch r {
|
||||
case '%',
|
||||
'%':
|
||||
case '%', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1182,6 +1170,7 @@ func appendQuotedBytes(w io.Writer, s string) {
|
||||
_, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b)
|
||||
}
|
||||
}
|
||||
|
||||
func appendUnquotedByte(w io.Writer, s string) bool {
|
||||
if len(s) < 2 {
|
||||
return false
|
||||
@@ -1202,12 +1191,15 @@ func (identity) Decode(in string) string { return in }
|
||||
func (i identity) FromStandardPath(s string) string {
|
||||
return FromStandardPath(i, s)
|
||||
}
|
||||
|
||||
func (i identity) FromStandardName(s string) string {
|
||||
return FromStandardName(i, s)
|
||||
}
|
||||
|
||||
func (i identity) ToStandardPath(s string) string {
|
||||
return ToStandardPath(i, s)
|
||||
}
|
||||
|
||||
func (i identity) ToStandardName(s string) string {
|
||||
return ToStandardName(i, s)
|
||||
}
|
||||
|
||||
75
lib/proxy/http.go
Normal file
75
lib/proxy/http.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// HTTPConnectDial connects using HTTP CONNECT via proxyDialer
|
||||
//
|
||||
// It will read the HTTP proxy address from the environment in the
|
||||
// standard way.
|
||||
//
|
||||
// It optionally takes a proxyDialer to dial the HTTP proxy server.
|
||||
// If nil is passed, it will use the default net.Dialer.
|
||||
func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.Dialer) (net.Conn, error) {
|
||||
if proxyDialer == nil {
|
||||
proxyDialer = &net.Dialer{}
|
||||
}
|
||||
if proxyURL == nil {
|
||||
return proxyDialer.Dial(network, addr)
|
||||
}
|
||||
|
||||
// prepare proxy host with default ports
|
||||
host := proxyURL.Host
|
||||
if !strings.Contains(host, ":") {
|
||||
if strings.EqualFold(proxyURL.Scheme, "https") {
|
||||
host += ":443"
|
||||
} else {
|
||||
host += ":80"
|
||||
}
|
||||
}
|
||||
|
||||
// connect to proxy
|
||||
conn, err := proxyDialer.Dial(network, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to Dial: %q", err)
|
||||
}
|
||||
|
||||
// wrap TLS if HTTPS proxy
|
||||
if strings.EqualFold(proxyURL.Scheme, "https") {
|
||||
tlsConfig := &tls.Config{ServerName: proxyURL.Hostname()}
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to make TLS connection: %q", err)
|
||||
}
|
||||
conn = tlsConn
|
||||
}
|
||||
|
||||
// send CONNECT
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)
|
||||
}
|
||||
br := bufio.NewReader(conn)
|
||||
req := &http.Request{URL: &url.URL{Scheme: "http", Host: addr}}
|
||||
resp, err := http.ReadResponse(br, req)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to read response: %q", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed: %s", resp.Status)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
71
lib/transform/cmap.go
Normal file
71
lib/transform/cmap.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
)
|
||||
|
||||
var (
|
||||
cmaps = map[int]*charmap.Charmap{}
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
type cmapChoices struct{}
|
||||
|
||||
func (cmapChoices) Choices() []string {
|
||||
choices := make([]string, 1)
|
||||
i := 0
|
||||
for _, enc := range charmap.All {
|
||||
c, ok := enc.(*charmap.Charmap)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
name := strings.ReplaceAll(c.String(), " ", "-")
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("unknown-%d", i)
|
||||
}
|
||||
lock.Lock()
|
||||
cmaps[i] = c
|
||||
lock.Unlock()
|
||||
choices = append(choices, name)
|
||||
i++
|
||||
}
|
||||
return choices
|
||||
}
|
||||
|
||||
func (cmapChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
|
||||
lock.Lock()
|
||||
c, ok := cmaps[int(cm)]
|
||||
lock.Unlock()
|
||||
if ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
b, ok := cmap.EncodeRune(r)
|
||||
if !ok {
|
||||
return '_'
|
||||
}
|
||||
return cmap.DecodeByte(b)
|
||||
}, s)
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r <= 127 {
|
||||
return r
|
||||
}
|
||||
return -1
|
||||
}, s)
|
||||
}
|
||||
136
lib/transform/help.go
Normal file
136
lib/transform/help.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
type commands struct {
|
||||
command string
|
||||
description string
|
||||
}
|
||||
|
||||
type example struct {
|
||||
path string
|
||||
flags []string
|
||||
}
|
||||
|
||||
var commandList = []commands{
|
||||
{command: "--name-transform prefix=XXXX", description: "Prepends XXXX to the file name."},
|
||||
{command: "--name-transform suffix=XXXX", description: "Appends XXXX to the file name after the extension."},
|
||||
{command: "--name-transform suffix_keep_extension=XXXX", description: "Appends XXXX to the file name while preserving the original file extension."},
|
||||
{command: "--name-transform trimprefix=XXXX", description: "Removes XXXX if it appears at the start of the file name."},
|
||||
{command: "--name-transform trimsuffix=XXXX", description: "Removes XXXX if it appears at the end of the file name."},
|
||||
{command: "--name-transform regex=/pattern/replacement/", description: "Applies a regex-based transformation."},
|
||||
{command: "--name-transform replace=old:new", description: "Replaces occurrences of old with new in the file name."},
|
||||
{command: "--name-transform date={YYYYMMDD}", description: "Appends or prefixes the specified date format."},
|
||||
{command: "--name-transform truncate=N", description: "Truncates the file name to a maximum of N characters."},
|
||||
{command: "--name-transform base64encode", description: "Encodes the file name in Base64."},
|
||||
{command: "--name-transform base64decode", description: "Decodes a Base64-encoded file name."},
|
||||
{command: "--name-transform encoder=ENCODING", description: "Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh)."},
|
||||
{command: "--name-transform decoder=ENCODING", description: "Decodes the file name from the specified encoding."},
|
||||
{command: "--name-transform charmap=MAP", description: "Applies a character mapping transformation."},
|
||||
{command: "--name-transform lowercase", description: "Converts the file name to lowercase."},
|
||||
{command: "--name-transform uppercase", description: "Converts the file name to UPPERCASE."},
|
||||
{command: "--name-transform titlecase", description: "Converts the file name to Title Case."},
|
||||
{command: "--name-transform ascii", description: "Strips non-ASCII characters."},
|
||||
{command: "--name-transform url", description: "URL-encodes the file name."},
|
||||
{command: "--name-transform nfc", description: "Converts the file name to NFC Unicode normalization form."},
|
||||
{command: "--name-transform nfd", description: "Converts the file name to NFD Unicode normalization form."},
|
||||
{command: "--name-transform nfkc", description: "Converts the file name to NFKC Unicode normalization form."},
|
||||
{command: "--name-transform nfkd", description: "Converts the file name to NFKD Unicode normalization form."},
|
||||
{command: "--name-transform command=/path/to/my/programfile names.", description: "Executes an external program to transform"},
|
||||
}
|
||||
|
||||
var examples = []example{
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,uppercase"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,base64encode"}},
|
||||
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64decode"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", []string{"all,ascii"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,trimsuffix=.txt"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,charmap=ISO-8859-7"}},
|
||||
{"stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,truncate=21"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
|
||||
{"stories/The Quick Brown Fox!", []string{"date=-{YYYYMMDD}"}},
|
||||
{"stories/The Quick Brown Fox!", []string{"date=-{macfriendlytime}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,regex=[\\.\\w]/ab"}},
|
||||
}
|
||||
|
||||
func (e example) command() string {
|
||||
s := fmt.Sprintf(`rclone convmv %q`, e.path)
|
||||
for _, f := range e.flags {
|
||||
s += fmt.Sprintf(" --name-transform %q", f)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e example) output() string {
|
||||
ctx := context.Background()
|
||||
err := SetOptions(ctx, e.flags...)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error generating help text: %v", err)
|
||||
}
|
||||
return Path(ctx, e.path, false)
|
||||
}
|
||||
|
||||
// go run ./ convmv --help
|
||||
func sprintExamples() string {
|
||||
s := "Examples: \n\n"
|
||||
for _, e := range examples {
|
||||
s += fmt.Sprintf("```\n%s\n", e.command())
|
||||
s += fmt.Sprintf("// Output: %s\n```\n\n", e.output())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func commandTable() string {
|
||||
s := `| Command | Description |
|
||||
|------|------|`
|
||||
for _, c := range commandList {
|
||||
s += fmt.Sprintf("\n| `%s` | %s |", c.command, c.description)
|
||||
}
|
||||
s += "\n\n\n"
|
||||
return s
|
||||
}
|
||||
|
||||
// SprintList returns the example help text as a string
|
||||
func SprintList() string {
|
||||
var algos transformAlgo
|
||||
var charmaps fs.Enum[cmapChoices]
|
||||
s := commandTable()
|
||||
s += fmt.Sprintln("Conversion modes: \n```")
|
||||
for _, v := range algos.Choices() {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += fmt.Sprintln("Char maps: \n```")
|
||||
for _, v := range charmaps.Choices() {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += fmt.Sprintln("Encoding masks: \n```")
|
||||
for _, v := range strings.Split(encoder.ValidStrings(), ",") {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += sprintExamples()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList prints the example help text to stdout
|
||||
func PrintList() {
|
||||
fmt.Println(SprintList())
|
||||
}
|
||||
248
lib/transform/options.go
Normal file
248
lib/transform/options.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type transform struct {
|
||||
key transformAlgo // for example, "prefix"
|
||||
value string // for example, "some_prefix_"
|
||||
tag tag // file, dir, or all
|
||||
}
|
||||
|
||||
// tag controls which part of the file path is affected (file, dir, all)
|
||||
type tag int
|
||||
|
||||
// tag modes
|
||||
const (
|
||||
file tag = iota // Only transform the leaf name of files (default)
|
||||
dir // Only transform name of directories - these may appear anywhere in the path
|
||||
all // Transform the entire path for files and directories
|
||||
)
|
||||
|
||||
// Transforming returns true when transforms are in use
|
||||
func Transforming(ctx context.Context) bool {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return len(ci.NameTransform) > 0
|
||||
}
|
||||
|
||||
// SetOptions sets the options in ctx from flags passed in.
|
||||
// Any existing flags will be overwritten.
|
||||
// s should be in the same format as cmd line flags, i.e. "all,prefix=XXX"
|
||||
func SetOptions(ctx context.Context, s ...string) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.NameTransform = s
|
||||
_, err = getOptions(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// cache to minimize re-parsing
|
||||
var (
|
||||
cachedNameTransform []string
|
||||
cachedOpt []transform
|
||||
cacheLock sync.Mutex
|
||||
)
|
||||
|
||||
// getOptions sets the options from flags passed in.
|
||||
func getOptions(ctx context.Context) (opt []transform, err error) {
|
||||
if !Transforming(ctx) {
|
||||
return opt, nil
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// return cached opt if available
|
||||
if cachedNameTransform != nil && slices.Equal(ci.NameTransform, cachedNameTransform) {
|
||||
return cachedOpt, nil
|
||||
}
|
||||
|
||||
for _, transform := range ci.NameTransform {
|
||||
t, err := parse(transform)
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
opt = append(opt, t)
|
||||
}
|
||||
updateCache(ci.NameTransform, opt)
|
||||
return opt, nil
|
||||
}
|
||||
|
||||
func updateCache(nt []string, o []transform) {
|
||||
cacheLock.Lock()
|
||||
cachedNameTransform = slices.Clone(nt)
|
||||
cachedOpt = o
|
||||
cacheLock.Unlock()
|
||||
}
|
||||
|
||||
// parse a single instance of --name-transform
|
||||
func parse(s string) (t transform, err error) {
|
||||
if s == "" {
|
||||
return t, nil
|
||||
}
|
||||
s = t.parseTag(s)
|
||||
err = t.parseKeyVal(s)
|
||||
return t, err
|
||||
}
|
||||
|
||||
// parse the tag (file/dir/all), set the option accordingly, and return the trimmed string
|
||||
//
|
||||
// we don't worry about errors here because it will error anyway as an invalid key
|
||||
func (t *transform) parseTag(s string) string {
|
||||
if strings.HasPrefix(s, "file,") {
|
||||
t.tag = file
|
||||
return strings.TrimPrefix(s, "file,")
|
||||
}
|
||||
if strings.HasPrefix(s, "dir,") {
|
||||
t.tag = dir
|
||||
return strings.TrimPrefix(s, "dir,")
|
||||
}
|
||||
if strings.HasPrefix(s, "all,") {
|
||||
t.tag = all
|
||||
return strings.TrimPrefix(s, "all,")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// parse key and value (if any) by splitting on '=' sign
|
||||
// (file/dir/all tag has already been trimmed)
|
||||
func (t *transform) parseKeyVal(s string) (err error) {
|
||||
if !strings.ContainsRune(s, '=') {
|
||||
err = t.key.Set(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t.requiresValue() {
|
||||
fs.Debugf(nil, "received %v", s)
|
||||
return errors.New("value is required for " + t.key.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
split := strings.Split(s, "=")
|
||||
if len(split) != 2 {
|
||||
return errors.New("too many values")
|
||||
}
|
||||
if split[0] == "" {
|
||||
return errors.New("key cannot be blank")
|
||||
}
|
||||
err = t.key.Set(split[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.value = split[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this particular algorithm requires a value
|
||||
func (t *transform) requiresValue() bool {
|
||||
switch t.key {
|
||||
case ConvFindReplace:
|
||||
return true
|
||||
case ConvPrefix:
|
||||
return true
|
||||
case ConvSuffix:
|
||||
return true
|
||||
case ConvSuffixKeepExtension:
|
||||
return true
|
||||
case ConvTrimPrefix:
|
||||
return true
|
||||
case ConvTrimSuffix:
|
||||
return true
|
||||
case ConvIndex:
|
||||
return true
|
||||
case ConvDate:
|
||||
return true
|
||||
case ConvTruncate:
|
||||
return true
|
||||
case ConvEncoder:
|
||||
return true
|
||||
case ConvDecoder:
|
||||
return true
|
||||
case ConvRegex:
|
||||
return true
|
||||
case ConvCommand:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// transformAlgo describes conversion setting
|
||||
type transformAlgo = fs.Enum[transformChoices]
|
||||
|
||||
// Supported transform options
|
||||
const (
|
||||
ConvNone transformAlgo = iota
|
||||
ConvToNFC
|
||||
ConvToNFD
|
||||
ConvToNFKC
|
||||
ConvToNFKD
|
||||
ConvFindReplace
|
||||
ConvPrefix
|
||||
ConvSuffix
|
||||
ConvSuffixKeepExtension
|
||||
ConvTrimPrefix
|
||||
ConvTrimSuffix
|
||||
ConvIndex
|
||||
ConvDate
|
||||
ConvTruncate
|
||||
ConvBase64Encode
|
||||
ConvBase64Decode
|
||||
ConvEncoder
|
||||
ConvDecoder
|
||||
ConvISO8859_1
|
||||
ConvWindows1252
|
||||
ConvMacintosh
|
||||
ConvCharmap
|
||||
ConvLowercase
|
||||
ConvUppercase
|
||||
ConvTitlecase
|
||||
ConvASCII
|
||||
ConvURL
|
||||
ConvRegex
|
||||
ConvCommand
|
||||
)
|
||||
|
||||
type transformChoices struct{}
|
||||
|
||||
func (transformChoices) Choices() []string {
|
||||
return []string{
|
||||
ConvNone: "none",
|
||||
ConvToNFC: "nfc",
|
||||
ConvToNFD: "nfd",
|
||||
ConvToNFKC: "nfkc",
|
||||
ConvToNFKD: "nfkd",
|
||||
ConvFindReplace: "replace",
|
||||
ConvPrefix: "prefix",
|
||||
ConvSuffix: "suffix",
|
||||
ConvSuffixKeepExtension: "suffix_keep_extension",
|
||||
ConvTrimPrefix: "trimprefix",
|
||||
ConvTrimSuffix: "trimsuffix",
|
||||
ConvIndex: "index",
|
||||
ConvDate: "date",
|
||||
ConvTruncate: "truncate",
|
||||
ConvBase64Encode: "base64encode",
|
||||
ConvBase64Decode: "base64decode",
|
||||
ConvEncoder: "encoder",
|
||||
ConvDecoder: "decoder",
|
||||
ConvISO8859_1: "ISO-8859-1",
|
||||
ConvWindows1252: "Windows-1252",
|
||||
ConvMacintosh: "Macintosh",
|
||||
ConvCharmap: "charmap",
|
||||
ConvLowercase: "lowercase",
|
||||
ConvUppercase: "uppercase",
|
||||
ConvTitlecase: "titlecase",
|
||||
ConvASCII: "ascii",
|
||||
ConvURL: "url",
|
||||
ConvRegex: "regex",
|
||||
ConvCommand: "command",
|
||||
}
|
||||
}
|
||||
|
||||
func (transformChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
335
lib/transform/transform.go
Normal file
335
lib/transform/transform.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Package transform holds functions for path name transformations
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Path transforms a path s according to the --name-transform options in use
|
||||
//
|
||||
// If no transforms are in use, s is returned unchanged
|
||||
func Path(ctx context.Context, s string, isDir bool) string {
|
||||
if !Transforming(ctx) {
|
||||
return s
|
||||
}
|
||||
|
||||
old := s
|
||||
opt, err := getOptions(ctx)
|
||||
if err != nil {
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(s, "Failed to parse transform flags: %v", err)
|
||||
}
|
||||
for _, t := range opt {
|
||||
if isDir && t.tag == file {
|
||||
continue
|
||||
}
|
||||
baseOnly := !isDir && t.tag == file
|
||||
if t.tag == dir && !isDir {
|
||||
s, err = transformDir(s, t)
|
||||
} else {
|
||||
s, err = transformPath(s, t, baseOnly)
|
||||
}
|
||||
if err != nil {
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(s, "Failed to transform: %v", err)
|
||||
}
|
||||
}
|
||||
if old != s {
|
||||
fs.Debugf(old, "transformed to: %v", s)
|
||||
}
|
||||
if strings.Count(old, "/") != strings.Count(s, "/") {
|
||||
err = fs.CountError(ctx, fmt.Errorf("number of path segments must match: %v (%v), %v (%v)", old, strings.Count(old, "/"), s, strings.Count(s, "/")))
|
||||
fs.Errorf(old, "%v", err)
|
||||
return old
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// transformPath transforms a path string according to the chosen TransformAlgo.
|
||||
// Each path segment is transformed separately, to preserve path separators.
|
||||
// If baseOnly is true, only the base will be transformed (useful for renaming while walking a dir tree recursively.)
|
||||
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
|
||||
// otherwise, the entire is path is transformed.
|
||||
func transformPath(s string, t transform, baseOnly bool) (string, error) {
|
||||
if s == "" || s == "/" || s == "\\" || s == "." {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if baseOnly {
|
||||
transformedBase, err := transformPathSegment(path.Base(s), t)
|
||||
if err := validateSegment(transformedBase); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(path.Dir(s), transformedBase), err
|
||||
}
|
||||
|
||||
segments := strings.Split(s, "/")
|
||||
transformedSegments := make([]string, len(segments))
|
||||
for _, seg := range segments {
|
||||
convSeg, err := transformPathSegment(seg, t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := validateSegment(convSeg); err != nil {
|
||||
return "", err
|
||||
}
|
||||
transformedSegments = append(transformedSegments, convSeg)
|
||||
}
|
||||
return path.Join(transformedSegments...), nil
|
||||
}
|
||||
|
||||
// transform all but the last path segment
|
||||
func transformDir(s string, t transform) (string, error) {
|
||||
dirPath, err := transformPath(path.Dir(s), t, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dirPath, path.Base(s)), nil
|
||||
}
|
||||
|
||||
// transformPathSegment transforms one path segment (or really any string) according to the chosen TransformAlgo.
|
||||
// It assumes path separators have already been trimmed.
|
||||
func transformPathSegment(s string, t transform) (string, error) {
|
||||
switch t.key {
|
||||
case ConvNone:
|
||||
return s, nil
|
||||
case ConvToNFC:
|
||||
return norm.NFC.String(s), nil
|
||||
case ConvToNFD:
|
||||
return norm.NFD.String(s), nil
|
||||
case ConvToNFKC:
|
||||
return norm.NFKC.String(s), nil
|
||||
case ConvToNFKD:
|
||||
return norm.NFKD.String(s), nil
|
||||
case ConvBase64Encode:
|
||||
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
|
||||
case ConvBase64Decode:
|
||||
if s == ".DS_Store" {
|
||||
return s, nil
|
||||
}
|
||||
b, err := base64.URLEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "base64 error")
|
||||
}
|
||||
return string(b), err
|
||||
case ConvFindReplace:
|
||||
split := strings.Split(t.value, ":")
|
||||
if len(split) != 2 {
|
||||
return s, fmt.Errorf("wrong number of values: %v", t.value)
|
||||
}
|
||||
return strings.ReplaceAll(s, split[0], split[1]), nil
|
||||
case ConvPrefix:
|
||||
return t.value + s, nil
|
||||
case ConvSuffix:
|
||||
return s + t.value, nil
|
||||
case ConvSuffixKeepExtension:
|
||||
return SuffixKeepExtension(s, t.value), nil
|
||||
case ConvTrimPrefix:
|
||||
return strings.TrimPrefix(s, t.value), nil
|
||||
case ConvTrimSuffix:
|
||||
return strings.TrimSuffix(s, t.value), nil
|
||||
case ConvTruncate:
|
||||
max, err := strconv.Atoi(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
if max <= 0 {
|
||||
return s, nil
|
||||
}
|
||||
if utf8.RuneCountInString(s) <= max {
|
||||
return s, nil
|
||||
}
|
||||
runes := []rune(s)
|
||||
return string(runes[:max]), nil
|
||||
case ConvEncoder:
|
||||
var enc encoder.MultiEncoder
|
||||
err := enc.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return enc.Encode(s), nil
|
||||
case ConvDecoder:
|
||||
var enc encoder.MultiEncoder
|
||||
err := enc.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return enc.Decode(s), nil
|
||||
case ConvISO8859_1:
|
||||
return encodeWithReplacement(s, charmap.ISO8859_1), nil
|
||||
case ConvWindows1252:
|
||||
return encodeWithReplacement(s, charmap.Windows1252), nil
|
||||
case ConvMacintosh:
|
||||
return encodeWithReplacement(s, charmap.Macintosh), nil
|
||||
case ConvCharmap:
|
||||
var cmapType fs.Enum[cmapChoices]
|
||||
err := cmapType.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
c := charmapByID(cmapType)
|
||||
return encodeWithReplacement(s, c), nil
|
||||
case ConvLowercase:
|
||||
return strings.ToLower(s), nil
|
||||
case ConvUppercase:
|
||||
return strings.ToUpper(s), nil
|
||||
case ConvTitlecase:
|
||||
return strings.ToTitle(s), nil
|
||||
case ConvASCII:
|
||||
return toASCII(s), nil
|
||||
case ConvURL:
|
||||
return url.QueryEscape(s), nil
|
||||
case ConvDate:
|
||||
return s + AppyTimeGlobs(t.value, time.Now()), nil
|
||||
case ConvRegex:
|
||||
split := strings.Split(t.value, "/")
|
||||
if len(split) != 2 {
|
||||
return s, fmt.Errorf("regex syntax error: %v", t.value)
|
||||
}
|
||||
re := regexp.MustCompile(split[0])
|
||||
return re.ReplaceAllString(s, split[1]), nil
|
||||
case ConvCommand:
|
||||
return mapper(s, t.value)
|
||||
default:
|
||||
return "", errors.New("this option is not yet implemented")
|
||||
}
|
||||
}
|
||||
|
||||
// SuffixKeepExtension adds a suffix while keeping extension
|
||||
//
|
||||
// i.e. file.txt becomes file_somesuffix.txt not file.txt_somesuffix
|
||||
func SuffixKeepExtension(remote string, suffix string) string {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
}
|
||||
|
||||
// forbid transformations that add/remove path separators
|
||||
func validateSegment(s string) error {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return errors.New("transform cannot render path segments empty")
|
||||
}
|
||||
if strings.ContainsRune(s, '/') {
|
||||
return fmt.Errorf("transform cannot add path separators: %v", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
timeFormat = time.DateTime
|
||||
case "DateOnly":
|
||||
timeFormat = time.DateOnly
|
||||
case "TimeOnly":
|
||||
timeFormat = time.TimeOnly
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
case "YYYYMMDD":
|
||||
timeFormat = "20060102"
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
func mapper(s string, command string) (string, error) {
|
||||
out, err := exec.Command(command, s).CombinedOutput()
|
||||
if err != nil {
|
||||
out = bytes.TrimSpace(out)
|
||||
return s, fmt.Errorf("%s: error running command %q: %v", out, command+" "+s, err)
|
||||
}
|
||||
return string(bytes.TrimSpace(out)), nil
|
||||
}
|
||||
142
lib/transform/transform_test.go
Normal file
142
lib/transform/transform_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// sync tests are in fs/sync/sync_transform_test.go to avoid import cycle issues
|
||||
|
||||
func newOptions(s ...string) (context.Context, error) {
|
||||
ctx := context.Background()
|
||||
err := SetOptions(ctx, s...)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func TestPath(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"toe/toe/toe", "tictactoe/tictactoe/tictactoe"},
|
||||
{"a/b/c", "tictaca/tictacb/tictacc"},
|
||||
} {
|
||||
ctx, err := newOptions("all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTagOnFile(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "a/b/1c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("file,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirTagOnFile(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "1a/1b/c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("dir,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllTag(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "1a/1b/1c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("all,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTagOnDir(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b", "a/b"},
|
||||
} {
|
||||
ctx, err := newOptions("file,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, true)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirTagOnDir(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b", "1a/1b"},
|
||||
} {
|
||||
ctx, err := newOptions("dir,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, true)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVarious(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
flags []string
|
||||
}{
|
||||
{"stories/The Quick Brown Fox!.txt", "STORIES/THE QUICK BROWN FOX!.TXT", []string{"all,uppercase"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Slow Brown Turtle!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64encode"}},
|
||||
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", "stories/The Quick Brown Fox!.txt", []string{"all,base64decode"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,ascii"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The+Quick+Brown+%F0%9F%A6%8A+Fox%21.txt", []string{"all,url"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!", []string{"all,trimsuffix=.txt"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "OLD_stories/OLD_The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown _ Fox Went to the Caf_!.txt", []string{"all,charmap=ISO-8859-7"}},
|
||||
{"stories/The Quick Brown Fox: A Memoir [draft].txt", "stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox", []string{"all,truncate=21"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("20060102"), []string{"date=-{YYYYMMDD}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("2006-01-02 0304PM"), []string{"date=-{macfriendlytime}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "ababababababab/ababab ababababab ababababab ababab!abababab", []string{"all,regex=[\\.\\w]/ab"}},
|
||||
} {
|
||||
ctx, err := newOptions(test.flags...)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user