mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
26 Commits
pr-8538-tr
...
pr-8538-tr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b2aa962cd | ||
|
|
19fe519ac8 | ||
|
|
ab7d6e72e1 | ||
|
|
026e97292f | ||
|
|
740a4080f0 | ||
|
|
6e2edbaf18 | ||
|
|
15510c66d4 | ||
|
|
dfa4d94827 | ||
|
|
36b89960e3 | ||
|
|
a3f3fc61ee | ||
|
|
b8fde4fc46 | ||
|
|
c37fe733df | ||
|
|
b31659904f | ||
|
|
ebcf51336e | ||
|
|
a334bba643 | ||
|
|
d4fd93e7f3 | ||
|
|
6644bdba0f | ||
|
|
68a65e878f | ||
|
|
7606ad8294 | ||
|
|
32847e88b4 | ||
|
|
2e879586bd | ||
|
|
9d55b2411f | ||
|
|
fe880c0fac | ||
|
|
b160089be7 | ||
|
|
c2254164f8 | ||
|
|
e57b94c4ac |
@@ -572,3 +572,19 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
@@ -43,6 +43,7 @@ var (
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
errReadOnly = errors.New("can't upload files in read only mode")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,19 +53,31 @@ const (
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
|
||||
)
|
||||
|
||||
var (
|
||||
// scopes needed for read write access
|
||||
scopesReadWrite = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeAppendOnly,
|
||||
scopeReadOnly,
|
||||
scopeReadWrite,
|
||||
}
|
||||
|
||||
// scopes needed for read only access
|
||||
scopesReadOnly = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadOnly,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Scopes: scopesReadWrite,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -100,9 +113,9 @@ func init() {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
oauthConfig.Scopes = scopesReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
@@ -1120,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
if o.fs.opt.ReadOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
|
||||
@@ -252,18 +252,14 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
// icloud has some weird http codes
|
||||
if err != nil && resp != nil && resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
|
||||
@@ -56,6 +56,7 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
maxSinglePartSize = 4 * fs.Mebi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -138,6 +139,21 @@ func init() {
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
|
||||
This is disabled by default as uploading using single part uploads
|
||||
causes rclone to use twice the storage on Onedrive business as when
|
||||
rclone sets the modification time after the upload Onedrive creates a
|
||||
new version.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/1716
|
||||
`,
|
||||
Default: fs.SizeSuffix(-1),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
@@ -746,6 +762,7 @@ Examples:
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
@@ -1022,6 +1039,13 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxSinglePartSize {
|
||||
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -1035,6 +1059,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
@@ -2469,6 +2497,10 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return false, nil
|
||||
}
|
||||
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
|
||||
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
|
||||
time.Sleep(5 * time.Second) // a little delay to help things along
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -2563,8 +2595,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
if size < 0 || size > int64(maxSinglePartSize) {
|
||||
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -2617,9 +2649,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size == 0 {
|
||||
} else if size >= 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/cleanup"
|
||||
_ "github.com/rclone/rclone/cmd/cmount"
|
||||
_ "github.com/rclone/rclone/cmd/config"
|
||||
_ "github.com/rclone/rclone/cmd/convmv"
|
||||
_ "github.com/rclone/rclone/cmd/copy"
|
||||
_ "github.com/rclone/rclone/cmd/copyto"
|
||||
_ "github.com/rclone/rclone/cmd/copyurl"
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
@@ -85,81 +83,3 @@ func (am AliasMap) Alias(name1 string) string {
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
@@ -3,20 +3,22 @@ package bilib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
buf.WriteString(text)
|
||||
})
|
||||
defer func() {
|
||||
log.Handler.ResetOutput()
|
||||
log.Handler.SetLevel(oldLevel)
|
||||
}()
|
||||
fun()
|
||||
log.SetOutput(logSave)
|
||||
logrus.SetOutput(logrusSave)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +11,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
)
|
||||
|
||||
// Prefer describes strategies for resolving sync conflicts
|
||||
@@ -97,8 +96,8 @@ func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
||||
}
|
||||
// replace glob variables, if any
|
||||
t := time.Now() // capture static time here so it is the same for all files throughout this run
|
||||
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
|
||||
// append dot (intentionally allow more than one)
|
||||
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
|
||||
@@ -130,6 +129,7 @@ type (
|
||||
path2 namePair
|
||||
}
|
||||
)
|
||||
|
||||
type namePair struct {
|
||||
oldName string
|
||||
newName string
|
||||
@@ -240,24 +240,7 @@ func SuffixName(ctx context.Context, remote, suffix string) string {
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
return transform.SuffixKeepExtension(remote, suffix)
|
||||
}
|
||||
return remote + suffix
|
||||
}
|
||||
|
||||
108
cmd/convmv/convmv.go
Normal file
108
cmd/convmv/convmv.go
Normal file
@@ -0,0 +1,108 @@
|
||||
// Package convmv provides the convmv command.
|
||||
package convmv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.SprintList()+`
|
||||
|
||||
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories ##
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
| ¡file¡ | Only transform the leaf name of files (DEFAULT) |
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
|
||||
## Ordering and Conflicts ##
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior ##
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
|
||||
* To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst, srcFileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(false, true, command, func() error {
|
||||
if !transform.Transforming(context.Background()) {
|
||||
return errors.New("--name-transform must be set")
|
||||
}
|
||||
if srcFileName == "" {
|
||||
return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
}
|
||||
return operations.TransformFile(context.Background(), fdst, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
253
cmd/convmv/convmv_test.go
Normal file
253
cmd/convmv/convmv_test.go
Normal file
@@ -0,0 +1,253 @@
|
||||
// Package convmv provides the convmv command.
|
||||
package convmv
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all backends
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Some times used in the tests
|
||||
var (
|
||||
t1 = fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
debug = ``
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
type args struct {
|
||||
TransformOpt []string
|
||||
TransformBackOpt []string
|
||||
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{name: "NFC", args: args{
|
||||
TransformOpt: []string{"all,nfc"},
|
||||
TransformBackOpt: []string{"all,nfd"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFD", args: args{
|
||||
TransformOpt: []string{"all,nfd"},
|
||||
TransformBackOpt: []string{"all,nfc"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "base64", args: args{
|
||||
TransformOpt: []string{"all,base64encode"},
|
||||
TransformBackOpt: []string{"all,base64encode"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "prefix", args: args{
|
||||
TransformOpt: []string{"all,prefix=PREFIX"},
|
||||
TransformBackOpt: []string{"all,trimprefix=PREFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "suffix", args: args{
|
||||
TransformOpt: []string{"all,suffix=SUFFIX"},
|
||||
TransformBackOpt: []string{"all,trimsuffix=SUFFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "truncate", args: args{
|
||||
TransformOpt: []string{"all,truncate=10"},
|
||||
TransformBackOpt: []string{"all,truncate=10"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "encoder", args: args{
|
||||
TransformOpt: []string{"all,encoder=Colon,SquareBracket"},
|
||||
TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "ISO-8859-1", args: args{
|
||||
TransformOpt: []string{"all,ISO-8859-1"},
|
||||
TransformBackOpt: []string{"all,ISO-8859-1"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "charmap", args: args{
|
||||
TransformOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "lowercase", args: args{
|
||||
TransformOpt: []string{"all,lowercase"},
|
||||
TransformBackOpt: []string{"all,lowercase"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "ascii", args: args{
|
||||
TransformOpt: []string{"all,ascii"},
|
||||
TransformBackOpt: []string{"all,ascii"},
|
||||
Lossless: false,
|
||||
}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Flocal)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
items := makeTestFiles(t, r, "dir1")
|
||||
err := r.Fremote.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
err = r.Flocal.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
|
||||
err = transform.SetOptions(ctx, tt.args.TransformOpt...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
transformedItems := transformItems(ctx, t, items)
|
||||
r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)})
|
||||
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
|
||||
require.NoError(t, err)
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, transformedItems)
|
||||
|
||||
if tt.args.Lossless {
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
|
||||
const alphabet = "abcdefg123456789"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
t.Helper()
|
||||
n := 0
|
||||
// Create test files
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName = strings.ToValidUTF8(fileName, "")
|
||||
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
|
||||
|
||||
if debug != "" {
|
||||
fileName = debug
|
||||
}
|
||||
|
||||
item := r.WriteObject(context.Background(), fileName, fileName, t1)
|
||||
r.WriteFile(fileName, fileName, t1)
|
||||
items = append(items, item)
|
||||
n++
|
||||
|
||||
if debug != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, extra := range extras {
|
||||
item := r.WriteObject(context.Background(), extra, extra, t1)
|
||||
r.WriteFile(extra, extra, t1)
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func deleteDSStore(t *testing.T, r *fstest.Run) {
|
||||
ctxDSStore, fi := filter.AddConfig(context.Background())
|
||||
err := fi.AddRule(`+ *.DS_Store`)
|
||||
assert.NoError(t, err)
|
||||
err = fi.AddRule(`- **`)
|
||||
assert.NoError(t, err)
|
||||
err = operations.Delete(ctxDSStore, r.Fremote)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
var entries fs.DirEntries
|
||||
|
||||
deleteDSStore(t, r)
|
||||
err := walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(e fs.DirEntries) error {
|
||||
entries = append(entries, e...)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
entries = slices.DeleteFunc(entries, func(E fs.DirEntry) bool { // remove those pesky .DS_Store files
|
||||
if strings.Contains(E.Remote(), ".DS_Store") {
|
||||
err := operations.DeleteFile(context.Background(), E.(fs.Object))
|
||||
assert.NoError(t, err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
require.Equal(t, len(items), entries.Len())
|
||||
|
||||
// sort by CONVERTED name
|
||||
slices.SortStableFunc(items, func(a, b fstest.Item) int {
|
||||
aConv := transform.Path(ctx, a.Path, false)
|
||||
bConv := transform.Path(ctx, b.Path, false)
|
||||
return cmp.Compare(aConv, bConv)
|
||||
})
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Remote(), b.Remote())
|
||||
})
|
||||
|
||||
for i, e := range entries {
|
||||
expect := transform.Path(ctx, items[i].Path, false)
|
||||
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
|
||||
assert.Equal(t, expect, e.Remote(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item {
|
||||
transformedItems := []fstest.Item{}
|
||||
for _, item := range items {
|
||||
newPath := transform.Path(ctx, item.Path, false)
|
||||
newItem := item
|
||||
newItem.Path = newPath
|
||||
transformedItems = append(transformedItems, newItem)
|
||||
}
|
||||
return transformedItems
|
||||
}
|
||||
|
||||
func detectEncoding(s string) string {
|
||||
if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "BOTH"
|
||||
}
|
||||
if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "NFD"
|
||||
}
|
||||
if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) {
|
||||
return "NFC"
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
@@ -191,7 +191,6 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
})
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
}
|
||||
|
||||
// Traverse the tree of commands running fn on each
|
||||
|
||||
@@ -6,6 +6,8 @@ package ncdu
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -925,23 +927,19 @@ func (u *UI) Run() error {
|
||||
return fmt.Errorf("screen init: %w", err)
|
||||
}
|
||||
|
||||
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
|
||||
if logOutput := fs.LogOutput; !log.Redirected() {
|
||||
type log struct {
|
||||
text string
|
||||
level fs.LogLevel
|
||||
}
|
||||
var logs []log
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
// Hijack log output so that it doesn't corrupt the screen.
|
||||
if !log.Redirected() {
|
||||
var logs []string
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
if len(logs) > 100 {
|
||||
logs = logs[len(logs)-100:]
|
||||
}
|
||||
logs = append(logs, log{level: level, text: text})
|
||||
}
|
||||
logs = append(logs, text)
|
||||
})
|
||||
defer func() {
|
||||
fs.LogOutput = logOutput
|
||||
for i := range logs {
|
||||
logOutput(logs[i].level, logs[i].text)
|
||||
log.Handler.ResetOutput()
|
||||
for _, text := range logs {
|
||||
_, _ = os.Stderr.WriteString(text)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
const (
|
||||
// interval between progress prints
|
||||
defaultProgressInterval = 500 * time.Millisecond
|
||||
// time format for logging
|
||||
logTimeFormat = "2006/01/02 15:04:05"
|
||||
)
|
||||
|
||||
// startProgress starts the progress bar printing
|
||||
@@ -28,15 +26,13 @@ const (
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
stopStats := make(chan struct{})
|
||||
oldLogOutput := fs.LogOutput
|
||||
oldSyncPrint := operations.SyncPrintf
|
||||
|
||||
if !log.Redirected() {
|
||||
// Intercept the log calls if not logging to file or syslog
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
|
||||
|
||||
}
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
printProgress(text)
|
||||
})
|
||||
}
|
||||
|
||||
// Intercept output from functions such as HashLister to stdout
|
||||
@@ -60,7 +56,10 @@ func startProgress() func() {
|
||||
case <-stopStats:
|
||||
ticker.Stop()
|
||||
printProgress("")
|
||||
fs.LogOutput = oldLogOutput
|
||||
if !log.Redirected() {
|
||||
// Reset intercept of the log calls
|
||||
log.Handler.ResetOutput()
|
||||
}
|
||||
operations.SyncPrintf = oldSyncPrint
|
||||
fmt.Println("")
|
||||
return
|
||||
|
||||
@@ -28,7 +28,8 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(objectPath)
|
||||
prefixWithTrailingSlash := objectPath + "/"
|
||||
response.AddPrefix(prefixWithTrailingSlash)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
env = "RCLONE_LOG_LEVEL=DEBUG"
|
||||
out, err = rcloneEnv(env, "version", "--quiet")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, out, " DEBUG : ")
|
||||
assert.Contains(t, out, " DEBUG ")
|
||||
assert.Contains(t, out, "Can't set -q and --log-level")
|
||||
assert.Contains(t, "exit status 1", err.Error())
|
||||
}
|
||||
@@ -329,7 +329,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
jsonLogOK := func() {
|
||||
t.Helper()
|
||||
if assert.NoError(t, err) {
|
||||
assert.Contains(t, out, `{"level":"debug",`)
|
||||
assert.Contains(t, out, `"level":"debug"`)
|
||||
assert.Contains(t, out, `"msg":"Version `)
|
||||
assert.Contains(t, out, `"}`)
|
||||
}
|
||||
|
||||
@@ -967,3 +967,9 @@ put them back in again.` >}}
|
||||
* Christian Richter <crichter@owncloud.com> <1058116+dragonchaser@users.noreply.github.com>
|
||||
* Ralf Haferkamp <r.haferkamp@opencloud.eu>
|
||||
* Jugal Kishore <me@devjugal.com>
|
||||
* Tho Neyugn <nguyentruongtho@users.noreply.github.com>
|
||||
* Ben Boeckel <mathstuf@users.noreply.github.com>
|
||||
* Clément Wehrung <cwehrung@nurves.com>
|
||||
* Jeff Geerling <geerlingguy@mac.com>
|
||||
* Germán Casares <german.casares.march+github@gmail.com>
|
||||
* fhuber <florian.huber@noris.de>
|
||||
|
||||
@@ -5,6 +5,14 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.3 - 2025-05-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* build: Update github.com/ebitengine/purego to work around bug in go1.24.3 (Nick Craig-Wood)
|
||||
|
||||
## v1.69.2 - 2025-05-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)
|
||||
|
||||
@@ -968,8 +968,9 @@ on any OS, and the value is defined as following:
|
||||
- On Unix: `$HOME` if defined, else by looking up current user in OS-specific user database
|
||||
(e.g. passwd file), or else use the result from shell command `cd && pwd`.
|
||||
|
||||
If you run `rclone config file` you will see where the default
|
||||
location is for you.
|
||||
If you run `rclone config file` you will see where the default location is for
|
||||
you. Running `rclone config touch` will ensure a configuration file exists,
|
||||
creating an empty one in the default location if there is none.
|
||||
|
||||
The fact that an existing file `rclone.conf` in the same directory
|
||||
as the rclone executable is always preferred, means that it is easy
|
||||
@@ -980,7 +981,13 @@ same directory.
|
||||
If the location is set to empty string `""` or path to a file
|
||||
with name `notfound`, or the os null device represented by value `NUL` on
|
||||
Windows and `/dev/null` on Unix systems, then rclone will keep the
|
||||
config file in memory only.
|
||||
configuration file in memory only.
|
||||
|
||||
You may see a log message "Config file not found - using defaults" if there is
|
||||
no configuration file. This can be supressed, e.g. if you are using rclone
|
||||
entirely with [on the fly remotes](/docs/#backend-path-to-dir), by using
|
||||
memory-only configuration file or by creating an empty configuration file, as
|
||||
described above.
|
||||
|
||||
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
|
||||
Sections of text, led by a `[section]` header and followed by
|
||||
@@ -1476,12 +1483,21 @@ have a signal to rotate logs.
|
||||
|
||||
### --log-format LIST ###
|
||||
|
||||
Comma separated list of log format options. Accepted options are `date`,
|
||||
`time`, `microseconds`, `pid`, `longfile`, `shortfile`, `UTC`. Any other
|
||||
keywords will be silently ignored. `pid` will tag log messages with process
|
||||
identifier which useful with `rclone mount --daemon`. Other accepted
|
||||
options are explained in the [go documentation](https://pkg.go.dev/log#pkg-constants).
|
||||
The default log format is "`date`,`time`".
|
||||
Comma separated list of log format options. The accepted options are:
|
||||
|
||||
- `date` - Add a date in the format YYYY/MM/YY to the log.
|
||||
- `time` - Add a time to the log in format HH:MM:SS.
|
||||
- `microseconds` - Add microseconds to the time in format HH:MM:SS.SSSSSS.
|
||||
- `UTC` - Make the logs in UTC not localtime.
|
||||
- `longfile` - Adds the source file and line number of the log statement.
|
||||
- `shortfile` - Adds the source file and line number of the log statement.
|
||||
- `pid` - Add the process ID to the log - useful with `rclone mount --daemon`.
|
||||
- `nolevel` - Don't add the level to the log.
|
||||
- `json` - Equivalent to adding `--use-json-log`
|
||||
|
||||
They are added to the log line in the order above.
|
||||
|
||||
The default log format is `"date,time"`.
|
||||
|
||||
### --log-level LEVEL ###
|
||||
|
||||
@@ -1499,10 +1515,90 @@ warnings and significant events.
|
||||
|
||||
`ERROR` is equivalent to `-q`. It only outputs error messages.
|
||||
|
||||
### --windows-event-log LEVEL ###
|
||||
|
||||
If this is configured (the default is `OFF`) then logs of this level
|
||||
and above will be logged to the Windows event log in **addition** to
|
||||
the normal logs. These will be logged in JSON format as described
|
||||
below regardless of what format the main logs are configured for.
|
||||
|
||||
The Windows event log only has 3 levels of severity `Info`, `Warning`
|
||||
and `Error`. If enabled we map rclone levels like this.
|
||||
|
||||
- `Error` ← `ERROR` (and above)
|
||||
- `Warning` ← `WARNING` (note that this level is defined but not currently used).
|
||||
- `Info` ← `NOTICE`, `INFO` and `DEBUG`.
|
||||
|
||||
Rclone will declare its log source as "rclone" if it is has enough
|
||||
permissions to create the registry key needed. If not then logs will
|
||||
appear as "Application". You can run `rclone version --windows-event-log DEBUG`
|
||||
once as administrator to create the registry key in advance.
|
||||
|
||||
**Note** that the `--windows-event-log` level must be greater (more
|
||||
severe) than or equal to the `--log-level`. For example to log DEBUG
|
||||
to a log file but ERRORs to the event log you would use
|
||||
|
||||
--log-file rclone.log --log-level DEBUG --windows-event-log ERROR
|
||||
|
||||
This option is only supported Windows platforms.
|
||||
|
||||
### --use-json-log ###
|
||||
|
||||
This switches the log format to JSON for rclone. The fields of json log
|
||||
are level, msg, source, time.
|
||||
This switches the log format to JSON for rclone. The fields of JSON
|
||||
log are `level`, `msg`, `source`, `time`. The JSON logs will be
|
||||
printed on a single line, but are shown expanded here for clarity.
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:30:51.036237518+01:00",
|
||||
"level": "debug",
|
||||
"msg": "4 go routines active\n",
|
||||
"source": "cmd/cmd.go:298"
|
||||
}
|
||||
```
|
||||
|
||||
Completed data transfer logs will have extra `size` information. Logs
|
||||
which are about a particular object will have `object` and
|
||||
`objectType` fields also.
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:38:05.540846352+01:00",
|
||||
"level": "info",
|
||||
"msg": "Copied (new) to: file2.txt",
|
||||
"size": 6,
|
||||
"object": "file.txt",
|
||||
"objectType": "*local.Object",
|
||||
"source": "operations/copy.go:368"
|
||||
}
|
||||
```
|
||||
|
||||
Stats logs will contain a `stats` field which is the same as
|
||||
returned from the rc call [core/stats](/rc/#core-stats).
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:38:05.540912847+01:00",
|
||||
"level": "info",
|
||||
"msg": "...text version of the stats...",
|
||||
"stats": {
|
||||
"bytes": 6,
|
||||
"checks": 0,
|
||||
"deletedDirs": 0,
|
||||
"deletes": 0,
|
||||
"elapsedTime": 0.000904825,
|
||||
...truncated for clarity...
|
||||
"totalBytes": 6,
|
||||
"totalChecks": 0,
|
||||
"totalTransfers": 1,
|
||||
"transferTime": 0.000882794,
|
||||
"transfers": 1
|
||||
},
|
||||
"source": "accounting/stats.go:569"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### --low-level-retries NUMBER ###
|
||||
|
||||
@@ -1557,6 +1653,32 @@ Setting `--max-buffer-memory` allows the buffer memory to be
|
||||
controlled so that it doesn't overwhelm the machine and allows
|
||||
`--transfers` to be set large.
|
||||
|
||||
### --max-connections=N ###
|
||||
|
||||
This sets the maximum number of concurrent calls to the backend API.
|
||||
It may not map 1:1 to TCP or HTTP connections depending on the backend
|
||||
in use and the use of HTTP1 vs HTTP2.
|
||||
|
||||
When downloading files, backends only limit the initial opening of the
|
||||
stream. The bulk data download is not counted as a connection. This
|
||||
means that the `--max--connections` flag won't limit the total number
|
||||
of downloads.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure `--max-connections` is
|
||||
one more than the sum of `--transfers` and `--checkers`.
|
||||
|
||||
If you use `--check-first` then `--max-connections` just needs to be
|
||||
one more than the maximum of `--checkers` and `--transfers`.
|
||||
|
||||
So for `--max-connections 3` you'd use `--checkers 2 --transfers 2
|
||||
--check-first` or `--checkers 1 --transfers 1`.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads to limit the number of simultaneous parts being transferred.
|
||||
|
||||
### --max-delete=N ###
|
||||
|
||||
This tells rclone not to delete more than N files. If that limit is
|
||||
@@ -1848,6 +1970,13 @@ If the backend has a `--backend-upload-concurrency` setting (eg
|
||||
number of transfers instead if it is larger than the value of
|
||||
`--multi-thread-streams` or `--multi-thread-streams` isn't set.
|
||||
|
||||
### --name-transform COMMAND[=XXXX] ###
|
||||
`--name-transform` introduces path name transformations for
|
||||
`rclone copy`, `rclone sync`, and `rclone move`. These transformations
|
||||
enable modifications to source and destination file names by applying
|
||||
prefixes, suffixes, and other alterations during transfer operations.
|
||||
For detailed docs and examples, see [`convmv`](/commands/rclone_convmv/).
|
||||
|
||||
### --no-check-dest ###
|
||||
|
||||
The `--no-check-dest` can be used with `move` or `copy` and it causes
|
||||
|
||||
@@ -22,6 +22,20 @@ See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
This has now been documented in its own [remote setup page](/remote_setup/).
|
||||
|
||||
### How can I get rid of the "Config file not found" notice?
|
||||
|
||||
If you see a notice like 'NOTICE: Config file "rclone.conf" not found', this
|
||||
means you have not configured any remotes.
|
||||
|
||||
If you need to configure a remote, see the [config help docs](/docs/#configure).
|
||||
|
||||
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
|
||||
you can create an empty config file to get rid of this notice, for example:
|
||||
|
||||
```
|
||||
rclone config touch
|
||||
```
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
@@ -14,6 +14,11 @@ Google Photos.
|
||||
limitations, so please read the [limitations section](#limitations)
|
||||
carefully to make sure it is suitable for your use.
|
||||
|
||||
**NB** From March 31, 2025 rclone can only download photos it
|
||||
uploaded. This limitation is due to policy changes at Google. You may
|
||||
need to run `rclone config reconnect remote:` to make rclone work
|
||||
again after upgrading to rclone v1.70.
|
||||
|
||||
## Configuration
|
||||
|
||||
The initial setup for google cloud storage involves getting a token from Google Photos
|
||||
@@ -528,6 +533,11 @@ videos or images or formats that Google Photos doesn't understand,
|
||||
rclone will upload the file, then Google Photos will give an error
|
||||
when it is put turned into a media item.
|
||||
|
||||
**NB** From March 31, 2025 rclone can only download photos it
|
||||
uploaded. This limitation is due to policy changes at Google. You may
|
||||
need to run `rclone config reconnect remote:` to make rclone work
|
||||
again after upgrading to rclone v1.70.
|
||||
|
||||
Note that all media items uploaded to Google Photos through the API
|
||||
are stored in full resolution at "original quality" and **will** count
|
||||
towards your storage quota in your Google Account. The API does
|
||||
|
||||
@@ -5551,7 +5551,7 @@ source).
|
||||
|
||||
This has the following consequences:
|
||||
|
||||
- Using `rclone rcat` will fail as the medatada doesn't match after upload
|
||||
- Using `rclone rcat` will fail as the metadata doesn't match after upload
|
||||
- Uploading files with `rclone mount` will fail for the same reason
|
||||
- This can worked around by using `--vfs-cache-mode writes` or `--vfs-cache-mode full` or setting `--s3-upload-cutoff` large
|
||||
- Files uploaded via a multipart upload won't have their modtimes
|
||||
|
||||
52
fs/config.go
52
fs/config.go
@@ -545,31 +545,16 @@ var ConfigOptionsInfo = Options{{
|
||||
Help: "Add partial-suffix to temporary file name when --inplace is not used",
|
||||
Groups: "Copy",
|
||||
}, {
|
||||
Name: "max_connections",
|
||||
Help: strings.ReplaceAll(`Maximum number of simultaneous connections, 0 for unlimited.
|
||||
|
||||
This sets the maximum number of connections made to the backend on a
|
||||
per backend basis. Connections in this case are calls to the backend
|
||||
API and may not map 1:1 to TCP or HTTP connections depending on the
|
||||
backend in use.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure |--max-connections| is
|
||||
one more than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then |--max-connections| just needs to be
|
||||
one more than the maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |--max-connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads or downloads to limit the number of total connections.
|
||||
`, "|", "`"),
|
||||
Name: "max_connections",
|
||||
Help: "Maximum number of simultaneous backend API connections, 0 for unlimited.",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "name_transform",
|
||||
Default: []string{},
|
||||
Help: "Transform paths during the copy process.",
|
||||
Groups: "Copy",
|
||||
}}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
@@ -681,6 +666,7 @@ type ConfigInfo struct {
|
||||
PartialSuffix string `config:"partial_suffix"`
|
||||
MetadataMapper SpaceSepList `config:"metadata_mapper"`
|
||||
MaxConnections int `config:"max_connections"`
|
||||
NameTransform []string `config:"name_transform"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -691,9 +677,13 @@ func init() {
|
||||
RegisterGlobalOptions(OptionsInfo{Name: "main", Opt: globalConfig, Options: ConfigOptionsInfo, Reload: globalConfig.Reload})
|
||||
|
||||
// initial guess at log level from the flags
|
||||
globalConfig.LogLevel = initialLogLevel()
|
||||
globalConfig.LogLevel = InitialLogLevel()
|
||||
}
|
||||
|
||||
// LogReload is written by fs/log to set variables which should really
|
||||
// be there but we can't move due to them being visible here in the rc.
|
||||
var LogReload = func(*ConfigInfo) error { return nil }
|
||||
|
||||
// Reload assumes the config has been edited and does what is necessary to make it live
|
||||
func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
// Set -vv if --dump is in use
|
||||
@@ -707,11 +697,6 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
ci.StatsLogLevel = LogLevelNotice
|
||||
}
|
||||
|
||||
// If --use-json-log then start the JSON logger
|
||||
if ci.UseJSONLog {
|
||||
InstallJSONLogger(ci.LogLevel)
|
||||
}
|
||||
|
||||
// Check --compare-dest and --copy-dest
|
||||
if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 {
|
||||
return fmt.Errorf("can't use --compare-dest with --copy-dest")
|
||||
@@ -751,13 +736,12 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
nonZero(&ci.Transfers)
|
||||
nonZero(&ci.Checkers)
|
||||
|
||||
return nil
|
||||
return LogReload(ci)
|
||||
}
|
||||
|
||||
// Initial logging level
|
||||
//
|
||||
// Perform a simple check for debug flags to enable debug logging during the flag initialization
|
||||
func initialLogLevel() LogLevel {
|
||||
// InitialLogLevel performs a simple check for debug flags to enable
|
||||
// debug logging during the flag initialization.
|
||||
func InitialLogLevel() LogLevel {
|
||||
logLevel := LogLevelNotice
|
||||
for argIndex, arg := range os.Args {
|
||||
if strings.HasPrefix(arg, "-vv") && strings.TrimRight(arg, "v") == "-" {
|
||||
|
||||
126
fs/log.go
126
fs/log.go
@@ -4,10 +4,9 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
|
||||
@@ -33,6 +32,7 @@ const (
|
||||
LogLevelNotice // Normal logging, -q suppresses
|
||||
LogLevelInfo // Transfers, needs -v
|
||||
LogLevelDebug // Debug level, needs -vv
|
||||
LogLevelOff
|
||||
)
|
||||
|
||||
type logLevelChoices struct{}
|
||||
@@ -47,6 +47,7 @@ func (logLevelChoices) Choices() []string {
|
||||
LogLevelNotice: "NOTICE",
|
||||
LogLevelInfo: "INFO",
|
||||
LogLevelDebug: "DEBUG",
|
||||
LogLevelOff: "OFF",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,19 +55,33 @@ func (logLevelChoices) Type() string {
|
||||
return "LogLevel"
|
||||
}
|
||||
|
||||
// LogPrintPid enables process pid in log
|
||||
var LogPrintPid = false
|
||||
// slogLevel definitions defined as slog.Level constants.
|
||||
// The integer values determine severity for filtering.
|
||||
// Lower values are less severe (e.g., Debug), higher values are more severe (e.g., Emergency).
|
||||
// We fit our extra values into slog's scale.
|
||||
const (
|
||||
// slog.LevelDebug slog.Level = -4
|
||||
// slog.LevelInfo slog.Level = 0
|
||||
SlogLevelNotice = slog.Level(2) // Between Info (0) and Warn (4)
|
||||
// slog.LevelWarn slog.Level = 4
|
||||
// slog.LevelError slog.Level = 8
|
||||
SlogLevelCritical = slog.Level(12) // More severe than Error
|
||||
SlogLevelAlert = slog.Level(16) // More severe than Critical
|
||||
SlogLevelEmergency = slog.Level(20) // Most severe
|
||||
SlogLevelOff = slog.Level(24) // A very high value
|
||||
)
|
||||
|
||||
// InstallJSONLogger is a hook that --use-json-log calls
|
||||
var InstallJSONLogger = func(logLevel LogLevel) {}
|
||||
|
||||
// LogOutput sends the text to the logger of level
|
||||
var LogOutput = func(level LogLevel, text string) {
|
||||
text = fmt.Sprintf("%-6s: %s", level, text)
|
||||
if LogPrintPid {
|
||||
text = fmt.Sprintf("[%d] %s", os.Getpid(), text)
|
||||
}
|
||||
_ = log.Output(4, text)
|
||||
// Map our level numbers to slog level numbers
|
||||
var levelToSlog = []slog.Level{
|
||||
LogLevelEmergency: SlogLevelEmergency,
|
||||
LogLevelAlert: SlogLevelAlert,
|
||||
LogLevelCritical: SlogLevelCritical,
|
||||
LogLevelError: slog.LevelError,
|
||||
LogLevelWarning: slog.LevelWarn,
|
||||
LogLevelNotice: SlogLevelNotice,
|
||||
LogLevelInfo: slog.LevelInfo,
|
||||
LogLevelDebug: slog.LevelDebug,
|
||||
LogLevelOff: SlogLevelOff,
|
||||
}
|
||||
|
||||
// LogValueItem describes keyed item for a JSON log entry
|
||||
@@ -108,76 +123,45 @@ func (j LogValueItem) String() string {
|
||||
return fmt.Sprint(j.value)
|
||||
}
|
||||
|
||||
func logLogrus(level LogLevel, text string, fields logrus.Fields) {
|
||||
switch level {
|
||||
case LogLevelDebug:
|
||||
logrus.WithFields(fields).Debug(text)
|
||||
case LogLevelInfo:
|
||||
logrus.WithFields(fields).Info(text)
|
||||
case LogLevelNotice, LogLevelWarning:
|
||||
logrus.WithFields(fields).Warn(text)
|
||||
case LogLevelError:
|
||||
logrus.WithFields(fields).Error(text)
|
||||
case LogLevelCritical:
|
||||
logrus.WithFields(fields).Fatal(text)
|
||||
case LogLevelEmergency, LogLevelAlert:
|
||||
logrus.WithFields(fields).Panic(text)
|
||||
// LogLevelToSlog converts an rclone log level to log/slog log level.
|
||||
func LogLevelToSlog(level LogLevel) slog.Level {
|
||||
slogLevel := slog.LevelError
|
||||
// NB level is unsigned so we don't check < 0 here
|
||||
if int(level) < len(levelToSlog) {
|
||||
slogLevel = levelToSlog[level]
|
||||
}
|
||||
return slogLevel
|
||||
}
|
||||
|
||||
func logLogrusWithObject(level LogLevel, o any, text string, fields logrus.Fields) {
|
||||
func logSlog(level LogLevel, text string, attrs []any) {
|
||||
slog.Log(context.Background(), LogLevelToSlog(level), text, attrs...)
|
||||
}
|
||||
|
||||
func logSlogWithObject(level LogLevel, o any, text string, attrs []any) {
|
||||
if o != nil {
|
||||
if fields == nil {
|
||||
fields = logrus.Fields{}
|
||||
}
|
||||
fields["object"] = fmt.Sprintf("%+v", o)
|
||||
fields["objectType"] = fmt.Sprintf("%T", o)
|
||||
attrs = slices.Concat(attrs, []any{
|
||||
"object", fmt.Sprintf("%+v", o),
|
||||
"objectType", fmt.Sprintf("%T", o),
|
||||
})
|
||||
}
|
||||
logLogrus(level, text, fields)
|
||||
}
|
||||
|
||||
func logJSON(level LogLevel, o any, text string) {
|
||||
logLogrusWithObject(level, o, text, nil)
|
||||
}
|
||||
|
||||
func logJSONf(level LogLevel, o any, text string, args ...any) {
|
||||
text = fmt.Sprintf(text, args...)
|
||||
fields := logrus.Fields{}
|
||||
for _, arg := range args {
|
||||
if item, ok := arg.(LogValueItem); ok {
|
||||
fields[item.key] = item.value
|
||||
}
|
||||
}
|
||||
logLogrusWithObject(level, o, text, fields)
|
||||
}
|
||||
|
||||
func logPlain(level LogLevel, o any, text string) {
|
||||
if o != nil {
|
||||
text = fmt.Sprintf("%v: %s", o, text)
|
||||
}
|
||||
LogOutput(level, text)
|
||||
}
|
||||
|
||||
func logPlainf(level LogLevel, o any, text string, args ...any) {
|
||||
logPlain(level, o, fmt.Sprintf(text, args...))
|
||||
logSlog(level, text, attrs)
|
||||
}
|
||||
|
||||
// LogPrint produces a log string from the arguments passed in
|
||||
func LogPrint(level LogLevel, o any, text string) {
|
||||
if GetConfig(context.TODO()).UseJSONLog {
|
||||
logJSON(level, o, text)
|
||||
} else {
|
||||
logPlain(level, o, text)
|
||||
}
|
||||
logSlogWithObject(level, o, text, nil)
|
||||
}
|
||||
|
||||
// LogPrintf produces a log string from the arguments passed in
|
||||
func LogPrintf(level LogLevel, o any, text string, args ...any) {
|
||||
if GetConfig(context.TODO()).UseJSONLog {
|
||||
logJSONf(level, o, text, args...)
|
||||
} else {
|
||||
logPlainf(level, o, text, args...)
|
||||
text = fmt.Sprintf(text, args...)
|
||||
var fields []any
|
||||
for _, arg := range args {
|
||||
if item, ok := arg.(LogValueItem); ok {
|
||||
fields = append(fields, item.key, item.value)
|
||||
}
|
||||
}
|
||||
logSlogWithObject(level, o, text, fields)
|
||||
}
|
||||
|
||||
// LogLevelPrint writes logs at the given level
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var loggerInstalled = false
|
||||
|
||||
// InstallJSONLogger installs the JSON logger at the specified log level
|
||||
func InstallJSONLogger(logLevel fs.LogLevel) {
|
||||
if !loggerInstalled {
|
||||
logrus.AddHook(NewCallerHook())
|
||||
loggerInstalled = true
|
||||
}
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
|
||||
})
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
switch logLevel {
|
||||
case fs.LogLevelEmergency, fs.LogLevelAlert:
|
||||
logrus.SetLevel(logrus.PanicLevel)
|
||||
case fs.LogLevelCritical:
|
||||
logrus.SetLevel(logrus.FatalLevel)
|
||||
case fs.LogLevelError:
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
case fs.LogLevelWarning, fs.LogLevelNotice:
|
||||
logrus.SetLevel(logrus.WarnLevel)
|
||||
case fs.LogLevelInfo:
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
case fs.LogLevelDebug:
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
}
|
||||
|
||||
// install hook in fs to call to avoid circular dependency
|
||||
func init() {
|
||||
fs.InstallJSONLogger = InstallJSONLogger
|
||||
}
|
||||
|
||||
// CallerHook for log the calling file and line of the fine
|
||||
type CallerHook struct {
|
||||
Field string
|
||||
Skip int
|
||||
levels []logrus.Level
|
||||
}
|
||||
|
||||
// NewCallerHook use to make a hook
|
||||
func NewCallerHook(levels ...logrus.Level) logrus.Hook {
|
||||
hook := CallerHook{
|
||||
Field: "source",
|
||||
Skip: 7,
|
||||
levels: levels,
|
||||
}
|
||||
if len(hook.levels) == 0 {
|
||||
hook.levels = logrus.AllLevels
|
||||
}
|
||||
return &hook
|
||||
}
|
||||
|
||||
// Levels implement applied hook to which levels
|
||||
func (h *CallerHook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
// Fire logs the information of context (filename and line)
|
||||
func (h *CallerHook) Fire(entry *logrus.Entry) error {
|
||||
entry.Data[h.Field] = findCaller(h.Skip)
|
||||
return nil
|
||||
}
|
||||
|
||||
// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller
|
||||
func findCaller(skip int) string {
|
||||
file := ""
|
||||
line := 0
|
||||
for i := range 10 {
|
||||
file, line = getCaller(skip + i)
|
||||
if !strings.HasPrefix(file, "logrus") && !strings.Contains(file, "log.go") {
|
||||
break
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
|
||||
func getCaller(skip int) (string, int) {
|
||||
_, file, line, ok := runtime.Caller(skip)
|
||||
// fmt.Println(file,":",line)
|
||||
if !ok {
|
||||
return "", 0
|
||||
}
|
||||
n := 0
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
n++
|
||||
if n >= 2 {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return file, line
|
||||
}
|
||||
15
fs/log/event_log.go
Normal file
15
fs/log/event_log.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Windows event logging stubs for non windows machines
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Starts windows event log if configured.
|
||||
func startWindowsEventLog(*OutputHandler) error {
|
||||
return fmt.Errorf("windows event log not supported on %s platform", runtime.GOOS)
|
||||
}
|
||||
79
fs/log/event_log_windows.go
Normal file
79
fs/log/event_log_windows.go
Normal file
@@ -0,0 +1,79 @@
|
||||
// Windows event logging
|
||||
|
||||
//go:build windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc/eventlog"
|
||||
)
|
||||
|
||||
const (
|
||||
errorID = uint32(windows.ERROR_INTERNAL_ERROR)
|
||||
infoID = uint32(windows.ERROR_SUCCESS)
|
||||
sourceName = "rclone"
|
||||
)
|
||||
|
||||
var (
|
||||
windowsEventLog *eventlog.Log
|
||||
)
|
||||
|
||||
func startWindowsEventLog(handler *OutputHandler) error {
|
||||
// Don't install Windows event log if it is disabled.
|
||||
if Opt.WindowsEventLogLevel == fs.LogLevelOff {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Install the event source - we don't care if this fails as Windows has sensible fallbacks.
|
||||
_ = eventlog.InstallAsEventCreate(sourceName, eventlog.Info|eventlog.Warning|eventlog.Error)
|
||||
|
||||
// Open the event log
|
||||
// If sourceName didn't get registered then Windows will use "Application" instead which is fine.
|
||||
// Though in my tests it seemsed to use sourceName regardless.
|
||||
elog, err := eventlog.Open(sourceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open event log: %w", err)
|
||||
}
|
||||
|
||||
// Set the global for the handler
|
||||
windowsEventLog = elog
|
||||
|
||||
// Close it on exit
|
||||
atexit.Register(func() {
|
||||
err := elog.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to close Windows event log: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Add additional JSON logging to the eventLog handler.
|
||||
handler.AddOutput(true, eventLog)
|
||||
|
||||
fs.Infof(nil, "Logging to Windows event log at level %v", Opt.WindowsEventLogLevel)
|
||||
return nil
|
||||
}
|
||||
|
||||
// We use levels ERROR, NOTICE, INFO, DEBUG
|
||||
// Need to map to ERROR, WARNING, INFO
|
||||
func eventLog(level slog.Level, text string) {
|
||||
// Check to see if this level is required
|
||||
if level < fs.LogLevelToSlog(Opt.WindowsEventLogLevel) {
|
||||
return
|
||||
}
|
||||
|
||||
// Now log to windows eventLog
|
||||
switch level {
|
||||
case fs.SlogLevelEmergency, fs.SlogLevelAlert, fs.SlogLevelCritical, slog.LevelError:
|
||||
_ = windowsEventLog.Error(errorID, text)
|
||||
case slog.LevelWarn:
|
||||
_ = windowsEventLog.Warning(infoID, text)
|
||||
case fs.SlogLevelNotice, slog.LevelInfo, slog.LevelDebug:
|
||||
_ = windowsEventLog.Info(infoID, text)
|
||||
}
|
||||
}
|
||||
128
fs/log/log.go
128
fs/log/log.go
@@ -3,15 +3,14 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
@@ -22,7 +21,7 @@ var OptionsInfo = fs.Options{{
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
Name: "log_format",
|
||||
Default: "date,time",
|
||||
Default: logFormatDate | logFormatTime,
|
||||
Help: "Comma separated list of log format options",
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
@@ -40,15 +39,27 @@ var OptionsInfo = fs.Options{{
|
||||
Default: false,
|
||||
Help: "Activate systemd integration for the logger",
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
Name: "windows_event_log_level",
|
||||
Default: fs.LogLevelOff,
|
||||
Help: "Windows Event Log level DEBUG|INFO|NOTICE|ERROR|OFF",
|
||||
Groups: "Logging",
|
||||
Hide: func() fs.OptionVisibility {
|
||||
if runtime.GOOS == "windows" {
|
||||
return 0
|
||||
}
|
||||
return fs.OptionHideBoth
|
||||
}(),
|
||||
}}
|
||||
|
||||
// Options contains options for controlling the logging
|
||||
type Options struct {
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
Format string `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
|
||||
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
Format logFormat `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
|
||||
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
|
||||
WindowsEventLogLevel fs.LogLevel `config:"windows_event_log_level"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -58,6 +69,37 @@ func init() {
|
||||
// Opt is the options for the logger
|
||||
var Opt Options
|
||||
|
||||
// enum for the log format
|
||||
type logFormat = fs.Bits[logFormatChoices]
|
||||
|
||||
const (
|
||||
logFormatDate logFormat = 1 << iota
|
||||
logFormatTime
|
||||
logFormatMicroseconds
|
||||
logFormatUTC
|
||||
logFormatLongFile
|
||||
logFormatShortFile
|
||||
logFormatPid
|
||||
logFormatNoLevel
|
||||
logFormatJSON
|
||||
)
|
||||
|
||||
type logFormatChoices struct{}
|
||||
|
||||
func (logFormatChoices) Choices() []fs.BitsChoicesInfo {
|
||||
return []fs.BitsChoicesInfo{
|
||||
{Bit: uint64(logFormatDate), Name: "date"},
|
||||
{Bit: uint64(logFormatTime), Name: "time"},
|
||||
{Bit: uint64(logFormatMicroseconds), Name: "microseconds"},
|
||||
{Bit: uint64(logFormatUTC), Name: "UTC"},
|
||||
{Bit: uint64(logFormatLongFile), Name: "longfile"},
|
||||
{Bit: uint64(logFormatShortFile), Name: "shortfile"},
|
||||
{Bit: uint64(logFormatPid), Name: "pid"},
|
||||
{Bit: uint64(logFormatNoLevel), Name: "nolevel"},
|
||||
{Bit: uint64(logFormatJSON), Name: "json"},
|
||||
}
|
||||
}
|
||||
|
||||
// fnName returns the name of the calling +2 function
|
||||
func fnName() string {
|
||||
pc, _, _, ok := runtime.Caller(2)
|
||||
@@ -114,31 +156,29 @@ func Stack(o any, info string) {
|
||||
fs.LogPrintf(fs.LogLevelDebug, o, "%s\nStack trace:\n%s", info, buf)
|
||||
}
|
||||
|
||||
// This is called from fs when the config is reloaded
|
||||
//
|
||||
// The config should really be here but we can't move it as it is
|
||||
// externally visible in the rc.
|
||||
func logReload(ci *fs.ConfigInfo) error {
|
||||
Handler.SetLevel(fs.LogLevelToSlog(ci.LogLevel))
|
||||
|
||||
if Opt.WindowsEventLogLevel != fs.LogLevelOff && Opt.WindowsEventLogLevel > ci.LogLevel {
|
||||
return fmt.Errorf("--windows-event-log-level %q must be >= --log-level %q", Opt.WindowsEventLogLevel, ci.LogLevel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
fs.LogReload = logReload
|
||||
}
|
||||
|
||||
// InitLogging start the logging as per the command line flags
|
||||
func InitLogging() {
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if strings.Contains(flagsStr, ",date,") {
|
||||
flags |= log.Ldate
|
||||
}
|
||||
if strings.Contains(flagsStr, ",time,") {
|
||||
flags |= log.Ltime
|
||||
}
|
||||
if strings.Contains(flagsStr, ",microseconds,") {
|
||||
flags |= log.Lmicroseconds
|
||||
}
|
||||
if strings.Contains(flagsStr, ",UTC,") {
|
||||
flags |= log.LUTC
|
||||
}
|
||||
if strings.Contains(flagsStr, ",longfile,") {
|
||||
flags |= log.Llongfile
|
||||
}
|
||||
if strings.Contains(flagsStr, ",shortfile,") {
|
||||
flags |= log.Lshortfile
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
|
||||
fs.LogPrintPid = strings.Contains(flagsStr, ",pid,")
|
||||
// Note that ci only has the defaults in at this point
|
||||
// We set real values in logReload
|
||||
ci := fs.GetConfig(context.Background())
|
||||
|
||||
// Log file output
|
||||
if Opt.File != "" {
|
||||
@@ -150,17 +190,27 @@ func InitLogging() {
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
logrus.SetOutput(f)
|
||||
redirectStderr(f)
|
||||
Handler.setWriter(f)
|
||||
}
|
||||
|
||||
// --use-json-log implies JSON formatting
|
||||
if ci.UseJSONLog {
|
||||
Opt.Format |= logFormatJSON
|
||||
}
|
||||
|
||||
// Set slog level to initial log level
|
||||
Handler.SetLevel(fs.LogLevelToSlog(fs.InitialLogLevel()))
|
||||
|
||||
// Set the format to the configured format
|
||||
Handler.setFormat(Opt.Format)
|
||||
|
||||
// Syslog output
|
||||
if Opt.UseSyslog {
|
||||
if Opt.File != "" {
|
||||
fs.Fatalf(nil, "Can't use --syslog and --log-file together")
|
||||
}
|
||||
startSysLog()
|
||||
startSysLog(Handler)
|
||||
}
|
||||
|
||||
// Activate systemd logger support if systemd invocation ID is
|
||||
@@ -173,7 +223,15 @@ func InitLogging() {
|
||||
|
||||
// Systemd logging output
|
||||
if Opt.LogSystemdSupport {
|
||||
startSystemdLog()
|
||||
startSystemdLog(Handler)
|
||||
}
|
||||
|
||||
// Windows event logging
|
||||
if Opt.WindowsEventLogLevel != fs.LogLevelOff {
|
||||
err := startWindowsEventLog(Handler)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start windows event log: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
391
fs/log/slog.go
Normal file
391
fs/log/slog.go
Normal file
@@ -0,0 +1,391 @@
|
||||
// Interfaces for the slog package
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Handler is the standard handler for the logging.
|
||||
var Handler = defaultHandler()
|
||||
|
||||
// Create the default OutputHandler
|
||||
//
|
||||
// This logs to stderr with standard go logger format at level INFO.
|
||||
//
|
||||
// This will be adjusted by InitLogging to be the configured levels
|
||||
// but it is important we have a logger running regardless of whether
|
||||
// InitLogging has been called yet or not.
|
||||
func defaultHandler() *OutputHandler {
|
||||
// Default options for default handler
|
||||
var opts = &slog.HandlerOptions{
|
||||
Level: fs.LogLevelToSlog(fs.InitialLogLevel()),
|
||||
}
|
||||
|
||||
// Create our handler
|
||||
h := NewOutputHandler(os.Stderr, opts, logFormatDate|logFormatTime)
|
||||
|
||||
// Set the slog default handler
|
||||
slog.SetDefault(slog.New(h))
|
||||
|
||||
// Make log.Printf logs at level Notice
|
||||
slog.SetLogLoggerLevel(fs.SlogLevelNotice)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Map slog level names to string
|
||||
var slogNames = map[slog.Level]string{
|
||||
slog.LevelDebug: "DEBUG",
|
||||
slog.LevelInfo: "INFO",
|
||||
fs.SlogLevelNotice: "NOTICE",
|
||||
slog.LevelWarn: "WARNING",
|
||||
slog.LevelError: "ERROR",
|
||||
fs.SlogLevelCritical: "CRITICAL",
|
||||
fs.SlogLevelAlert: "ALERT",
|
||||
fs.SlogLevelEmergency: "EMERGENCY",
|
||||
}
|
||||
|
||||
// Convert a slog level to string using rclone's extra levels
|
||||
func slogLevelToString(level slog.Level) string {
|
||||
levelStr := slogNames[level]
|
||||
if levelStr == "" {
|
||||
levelStr = level.String()
|
||||
}
|
||||
return levelStr
|
||||
}
|
||||
|
||||
// ReplaceAttr function to customize the level key's string value in logs
|
||||
func mapLogLevelNames(groups []string, a slog.Attr) slog.Attr {
|
||||
if a.Key == slog.LevelKey {
|
||||
level, ok := a.Value.Any().(slog.Level)
|
||||
if !ok {
|
||||
return a
|
||||
}
|
||||
levelStr := strings.ToLower(slogLevelToString(level))
|
||||
a.Value = slog.StringValue(levelStr)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// get the file and line number of the caller skipping skip levels
|
||||
func getCaller(skip int) string {
|
||||
var pc [64]uintptr
|
||||
n := runtime.Callers(skip, pc[:])
|
||||
if n == 0 {
|
||||
return ""
|
||||
}
|
||||
frames := runtime.CallersFrames(pc[:n])
|
||||
var more = true
|
||||
var frame runtime.Frame
|
||||
for more {
|
||||
frame, more = frames.Next()
|
||||
|
||||
file := frame.File
|
||||
if strings.Contains(file, "/log/") || strings.HasSuffix(file, "log.go") {
|
||||
continue
|
||||
}
|
||||
line := frame.Line
|
||||
|
||||
// shorten file name
|
||||
n := 0
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
n++
|
||||
if n >= 2 {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// OutputHandler is a slog.Handler that writes log records in a format
|
||||
// identical to the standard library's `log` package (e.g., "YYYY/MM/DD HH:MM:SS message").
|
||||
//
|
||||
// It can also write logs in JSON format identical to logrus.
|
||||
type OutputHandler struct {
|
||||
opts slog.HandlerOptions
|
||||
levelVar slog.LevelVar
|
||||
writer io.Writer
|
||||
mu sync.Mutex
|
||||
output []outputFn // log to writer if empty or the last item
|
||||
outputExtra []outputExtra // log to all these additional places
|
||||
format logFormat
|
||||
jsonBuf bytes.Buffer
|
||||
jsonHandler *slog.JSONHandler
|
||||
}
|
||||
|
||||
// Records the type and function pointer for extra logging output.
|
||||
type outputExtra struct {
|
||||
json bool
|
||||
output outputFn
|
||||
}
|
||||
|
||||
// Define the type of the override logger
|
||||
type outputFn func(level slog.Level, text string)
|
||||
|
||||
// NewOutputHandler creates a new OutputHandler with the specified flags.
|
||||
//
|
||||
// This is designed to use log/slog but produce output which is
|
||||
// backwards compatible with previous rclone versions.
|
||||
//
|
||||
// If opts is nil, default options are used, with Level set to
|
||||
// slog.LevelInfo.
|
||||
func NewOutputHandler(out io.Writer, opts *slog.HandlerOptions, format logFormat) *OutputHandler {
|
||||
h := &OutputHandler{
|
||||
writer: out,
|
||||
format: format,
|
||||
}
|
||||
if opts != nil {
|
||||
h.opts = *opts
|
||||
}
|
||||
if h.opts.Level == nil {
|
||||
h.opts.Level = slog.LevelInfo
|
||||
}
|
||||
// Set the level var with the configured level
|
||||
h.levelVar.Set(h.opts.Level.Level())
|
||||
// And use it from now on
|
||||
h.opts.Level = &h.levelVar
|
||||
|
||||
// Create the JSON logger in case we need it
|
||||
jsonOpts := slog.HandlerOptions{
|
||||
Level: h.opts.Level,
|
||||
ReplaceAttr: mapLogLevelNames,
|
||||
}
|
||||
h.jsonHandler = slog.NewJSONHandler(&h.jsonBuf, &jsonOpts)
|
||||
return h
|
||||
}
|
||||
|
||||
// SetOutput sets a new output handler for the log output.
|
||||
//
|
||||
// This is for temporarily overriding the output.
|
||||
func (h *OutputHandler) SetOutput(fn outputFn) {
|
||||
h.output = append(h.output, fn)
|
||||
}
|
||||
|
||||
// ResetOutput resets the log output to what is was.
|
||||
func (h *OutputHandler) ResetOutput() {
|
||||
if len(h.output) > 0 {
|
||||
h.output = h.output[:len(h.output)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// AddOutput adds an additional logging destination of the type specified.
|
||||
func (h *OutputHandler) AddOutput(json bool, fn outputFn) {
|
||||
h.outputExtra = append(h.outputExtra, outputExtra{
|
||||
json: json,
|
||||
output: fn,
|
||||
})
|
||||
}
|
||||
|
||||
// SetLevel sets a new log level, returning the old one.
|
||||
func (h *OutputHandler) SetLevel(level slog.Level) slog.Level {
|
||||
oldLevel := h.levelVar.Level()
|
||||
h.levelVar.Set(level)
|
||||
return oldLevel
|
||||
}
|
||||
|
||||
// Set the writer for the log to that passed.
|
||||
func (h *OutputHandler) setWriter(writer io.Writer) {
|
||||
h.writer = writer
|
||||
}
|
||||
|
||||
// Set the format flags to that passed in.
|
||||
func (h *OutputHandler) setFormat(format logFormat) {
|
||||
h.format = format
|
||||
}
|
||||
|
||||
// clear format flags that this output type doesn't want
|
||||
func (h *OutputHandler) clearFormatFlags(bitMask logFormat) {
|
||||
h.format &^= bitMask
|
||||
}
|
||||
|
||||
// set format flags that this output type requires
|
||||
func (h *OutputHandler) setFormatFlags(bitMask logFormat) {
|
||||
h.format |= bitMask
|
||||
}
|
||||
|
||||
// Enabled returns whether this logger is enabled for this level.
|
||||
func (h *OutputHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
minLevel := slog.LevelInfo
|
||||
if h.opts.Level != nil {
|
||||
minLevel = h.opts.Level.Level()
|
||||
}
|
||||
return level >= minLevel
|
||||
}
|
||||
|
||||
// Create a log header in Go standard log format.
|
||||
func (h *OutputHandler) formatStdLogHeader(buf *bytes.Buffer, level slog.Level, t time.Time, object string, lineInfo string) {
|
||||
// Add time in Go standard format if requested
|
||||
if h.format&(logFormatDate|logFormatTime|logFormatMicroseconds) != 0 {
|
||||
if h.format&logFormatUTC != 0 {
|
||||
t = t.UTC()
|
||||
}
|
||||
if h.format&logFormatDate != 0 {
|
||||
year, month, day := t.Date()
|
||||
fmt.Fprintf(buf, "%04d/%02d/%02d ", year, month, day)
|
||||
}
|
||||
if h.format&(logFormatTime|logFormatMicroseconds) != 0 {
|
||||
hour, min, sec := t.Clock()
|
||||
fmt.Fprintf(buf, "%02d:%02d:%02d", hour, min, sec)
|
||||
if h.format&logFormatMicroseconds != 0 {
|
||||
fmt.Fprintf(buf, ".%06d", t.Nanosecond()/1e3)
|
||||
}
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
// Add source code filename:line if requested
|
||||
if h.format&(logFormatShortFile|logFormatLongFile) != 0 && lineInfo != "" {
|
||||
buf.WriteString(lineInfo)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
// Add PID if requested
|
||||
if h.format&logFormatPid != 0 {
|
||||
fmt.Fprintf(buf, "[%d] ", os.Getpid())
|
||||
}
|
||||
// Add log level if required
|
||||
if h.format&logFormatNoLevel == 0 {
|
||||
levelStr := slogLevelToString(level)
|
||||
fmt.Fprintf(buf, "%-6s: ", levelStr)
|
||||
}
|
||||
// Add object if passed
|
||||
if object != "" {
|
||||
buf.WriteString(object)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
// Create a log in standard Go log format into buf.
|
||||
func (h *OutputHandler) textLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) error {
|
||||
var lineInfo string
|
||||
if h.format&(logFormatShortFile|logFormatLongFile) != 0 {
|
||||
lineInfo = getCaller(2)
|
||||
}
|
||||
|
||||
var object string
|
||||
r.Attrs(func(attr slog.Attr) bool {
|
||||
if attr.Key == "object" {
|
||||
object = attr.Value.String()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
h.formatStdLogHeader(buf, r.Level, r.Time, object, lineInfo)
|
||||
buf.WriteString(r.Message)
|
||||
if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { // Ensure newline
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a log in JSON format into buf.
|
||||
func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) (err error) {
|
||||
// Call the JSON handler to create the JSON in buf
|
||||
r.AddAttrs(
|
||||
slog.String("source", getCaller(2)),
|
||||
)
|
||||
h.mu.Lock()
|
||||
err = h.jsonHandler.Handle(ctx, r)
|
||||
if err == nil {
|
||||
_, err = h.jsonBuf.WriteTo(buf)
|
||||
}
|
||||
h.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle outputs a log in the current format
|
||||
func (h *OutputHandler) Handle(ctx context.Context, r slog.Record) (err error) {
|
||||
var (
|
||||
bufJSON *bytes.Buffer
|
||||
bufText *bytes.Buffer
|
||||
buf *bytes.Buffer
|
||||
)
|
||||
|
||||
// Check whether we need to build Text or JSON logs or both
|
||||
needJSON := h.format&logFormatJSON != 0
|
||||
needText := !needJSON
|
||||
for _, out := range h.outputExtra {
|
||||
if out.json {
|
||||
needJSON = true
|
||||
} else {
|
||||
needText = true
|
||||
}
|
||||
}
|
||||
|
||||
if needJSON {
|
||||
var bufJSONBack [256]byte
|
||||
bufJSON = bytes.NewBuffer(bufJSONBack[:0])
|
||||
err = h.jsonLog(ctx, bufJSON, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if needText {
|
||||
var bufTextBack [256]byte
|
||||
bufText = bytes.NewBuffer(bufTextBack[:0])
|
||||
err = h.textLog(ctx, bufText, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
// Do the log, either to the default destination or to the alternate logging system
|
||||
if h.format&logFormatJSON != 0 {
|
||||
buf = bufJSON
|
||||
} else {
|
||||
buf = bufText
|
||||
}
|
||||
if len(h.output) > 0 {
|
||||
h.output[len(h.output)-1](r.Level, buf.String())
|
||||
err = nil
|
||||
} else {
|
||||
_, err = h.writer.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// Log to any additional destinations required
|
||||
for _, out := range h.outputExtra {
|
||||
if out.json {
|
||||
out.output(r.Level, bufJSON.String())
|
||||
} else {
|
||||
out.output(r.Level, bufText.String())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WithAttrs creates a new handler with the same writer, options, and flags.
|
||||
// Attributes are ignored for the output format of this specific handler.
|
||||
func (h *OutputHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return NewOutputHandler(h.writer, &h.opts, h.format)
|
||||
}
|
||||
|
||||
// WithGroup creates a new handler with the same writer, options, and flags.
|
||||
// Groups are ignored for the output format of this specific handler.
|
||||
func (h *OutputHandler) WithGroup(name string) slog.Handler {
|
||||
return NewOutputHandler(h.writer, &h.opts, h.format)
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ slog.Handler = (*OutputHandler)(nil)
|
||||
264
fs/log/slog_test.go
Normal file
264
fs/log/slog_test.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"log/slog"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
utcPlusOne = time.FixedZone("UTC+1", 1*60*60)
|
||||
t0 = time.Date(2020, 1, 2, 3, 4, 5, 123456000, utcPlusOne)
|
||||
)
|
||||
|
||||
// Test slogLevelToString covers all mapped levels and an unknown level.
|
||||
func TestSlogLevelToString(t *testing.T) {
|
||||
tests := []struct {
|
||||
level slog.Level
|
||||
want string
|
||||
}{
|
||||
{slog.LevelDebug, "DEBUG"},
|
||||
{slog.LevelInfo, "INFO"},
|
||||
{fs.SlogLevelNotice, "NOTICE"},
|
||||
{slog.LevelWarn, "WARNING"},
|
||||
{slog.LevelError, "ERROR"},
|
||||
{fs.SlogLevelCritical, "CRITICAL"},
|
||||
{fs.SlogLevelAlert, "ALERT"},
|
||||
{fs.SlogLevelEmergency, "EMERGENCY"},
|
||||
// Unknown level should fall back to .String()
|
||||
{slog.Level(1234), slog.Level(1234).String()},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := slogLevelToString(tc.level)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Test mapLogLevelNames replaces only the LevelKey attr and lowercases it.
|
||||
func TestMapLogLevelNames(t *testing.T) {
|
||||
a := slog.Any(slog.LevelKey, slog.LevelWarn)
|
||||
mapped := mapLogLevelNames(nil, a)
|
||||
val, ok := mapped.Value.Any().(string)
|
||||
if !ok || val != "warning" {
|
||||
t.Errorf("mapLogLevelNames did not lowercase level: got %v", mapped.Value.Any())
|
||||
}
|
||||
// non-level attr should remain unchanged
|
||||
other := slog.String("foo", "bar")
|
||||
out := mapLogLevelNames(nil, other)
|
||||
assert.Equal(t, out.Value, other.Value, "mapLogLevelNames changed a non-level attr")
|
||||
}
|
||||
|
||||
// Test getCaller returns a file:line string of the correct form.
|
||||
func TestGetCaller(t *testing.T) {
|
||||
out := getCaller(0)
|
||||
assert.NotEqual(t, "", out)
|
||||
match := regexp.MustCompile(`^([^:]+):(\d+)$`).FindStringSubmatch(out)
|
||||
assert.NotNil(t, match)
|
||||
// Can't test this as it skips the /log/ directory!
|
||||
// assert.Equal(t, "slog_test.go", match[1])
|
||||
}
|
||||
|
||||
// Test formatStdLogHeader for various flag combinations.
|
||||
func TestFormatStdLogHeader(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
format logFormat
|
||||
lineInfo string
|
||||
object string
|
||||
wantPrefix string
|
||||
}{
|
||||
{"dateTime", logFormatDate | logFormatTime, "", "", "2020/01/02 03:04:05 "},
|
||||
{"time", logFormatTime, "", "", "03:04:05 "},
|
||||
{"date", logFormatDate, "", "", "2020/01/02 "},
|
||||
{"dateTimeUTC", logFormatDate | logFormatTime | logFormatUTC, "", "", "2020/01/02 02:04:05 "},
|
||||
{"dateTimeMicro", logFormatDate | logFormatTime | logFormatMicroseconds, "", "", "2020/01/02 03:04:05.123456 "},
|
||||
{"micro", logFormatMicroseconds, "", "", "03:04:05.123456 "},
|
||||
{"shortFile", logFormatShortFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
|
||||
{"longFile", logFormatLongFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
|
||||
{"timePID", logFormatPid, "", "", fmt.Sprintf("[%d] ", os.Getpid())},
|
||||
{"levelObject", 0, "", "myobj", "INFO : myobj: "},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
h := &OutputHandler{format: tc.format}
|
||||
buf := &bytes.Buffer{}
|
||||
h.formatStdLogHeader(buf, slog.LevelInfo, t0, tc.object, tc.lineInfo)
|
||||
if !strings.HasPrefix(buf.String(), tc.wantPrefix) {
|
||||
t.Errorf("%s: got %q; want prefix %q", tc.name, buf.String(), tc.wantPrefix)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test Enabled honors the HandlerOptions.Level.
|
||||
func TestEnabled(t *testing.T) {
|
||||
h := NewOutputHandler(&bytes.Buffer{}, nil, 0)
|
||||
assert.True(t, h.Enabled(context.Background(), slog.LevelInfo))
|
||||
assert.False(t, h.Enabled(context.Background(), slog.LevelDebug))
|
||||
|
||||
opts := &slog.HandlerOptions{Level: slog.LevelDebug}
|
||||
h2 := NewOutputHandler(&bytes.Buffer{}, opts, 0)
|
||||
assert.True(t, h2.Enabled(context.Background(), slog.LevelDebug))
|
||||
}
|
||||
|
||||
// Test clearFormatFlags and setFormatFlags bitwise ops.
|
||||
func TestClearSetFormatFlags(t *testing.T) {
|
||||
h := &OutputHandler{format: logFormatDate | logFormatTime}
|
||||
|
||||
h.clearFormatFlags(logFormatTime)
|
||||
assert.True(t, h.format&logFormatTime == 0)
|
||||
|
||||
h.setFormatFlags(logFormatMicroseconds)
|
||||
assert.True(t, h.format&logFormatMicroseconds != 0)
|
||||
}
|
||||
|
||||
// Test SetOutput and ResetOutput override the default writer.
|
||||
func TestSetResetOutput(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, 0)
|
||||
var gotOverride string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
gotOverride = txt
|
||||
}
|
||||
|
||||
h.SetOutput(out)
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", gotOverride)
|
||||
require.Equal(t, "", buf.String())
|
||||
|
||||
h.ResetOutput()
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
require.NotEqual(t, "", buf.String())
|
||||
}
|
||||
|
||||
// Test AddOutput sends to extra destinations.
|
||||
func TestAddOutput(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(false, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test AddOutputJSON sends JSON to extra destinations.
|
||||
func TestAddOutputJSON(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(true, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", extraText)
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
|
||||
assert.True(t, strings.HasPrefix(extraText, `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
|
||||
assert.True(t, strings.HasSuffix(extraText, "\"}\n"))
|
||||
}
|
||||
|
||||
// Test AddOutputUseJSONLog sends text to extra destinations.
|
||||
func TestAddOutputUseJSONLog(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime|logFormatJSON)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(false, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", extraText)
|
||||
assert.True(t, strings.HasPrefix(buf.String(), `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
|
||||
assert.True(t, strings.HasSuffix(buf.String(), "\"}\n"))
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test WithAttrs and WithGroup return new handlers with same settings.
|
||||
func TestWithAttrsAndGroup(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate)
|
||||
h2 := h.WithAttrs([]slog.Attr{slog.String("k", "v")})
|
||||
if _, ok := h2.(*OutputHandler); !ok {
|
||||
t.Error("WithAttrs returned wrong type")
|
||||
}
|
||||
h3 := h.WithGroup("grp")
|
||||
if _, ok := h3.(*OutputHandler); !ok {
|
||||
t.Error("WithGroup returned wrong type")
|
||||
}
|
||||
}
|
||||
|
||||
// Test textLog and jsonLog directly for basic correctness.
|
||||
func TestTextLogAndJsonLog(t *testing.T) {
|
||||
h := NewOutputHandler(&bytes.Buffer{}, nil, logFormatDate|logFormatTime)
|
||||
r := slog.NewRecord(t0, slog.LevelWarn, "msg!", 0)
|
||||
r.AddAttrs(slog.String("object", "obj"))
|
||||
|
||||
// textLog
|
||||
bufText := &bytes.Buffer{}
|
||||
require.NoError(t, h.textLog(context.Background(), bufText, r))
|
||||
out := bufText.String()
|
||||
if !strings.Contains(out, "WARNING") || !strings.Contains(out, "obj:") || !strings.HasSuffix(out, "\n") {
|
||||
t.Errorf("textLog output = %q", out)
|
||||
}
|
||||
|
||||
// jsonLog
|
||||
bufJSON := &bytes.Buffer{}
|
||||
require.NoError(t, h.jsonLog(context.Background(), bufJSON, r))
|
||||
j := bufJSON.String()
|
||||
if !strings.Contains(j, `"level":"warning"`) || !strings.Contains(j, `"msg":"msg!"`) {
|
||||
t.Errorf("jsonLog output = %q", j)
|
||||
}
|
||||
}
|
||||
|
||||
// Table-driven test for JSON vs text Handle behavior.
|
||||
func TestHandleFormatFlags(t *testing.T) {
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hi", 0)
|
||||
cases := []struct {
|
||||
name string
|
||||
format logFormat
|
||||
wantJSON bool
|
||||
}{
|
||||
{"textMode", 0, false},
|
||||
{"jsonMode", logFormatJSON, true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, tc.format)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
out := buf.String()
|
||||
if tc.wantJSON {
|
||||
if !strings.HasPrefix(out, "{") || !strings.Contains(out, `"level":"info"`) {
|
||||
t.Errorf("%s: got %q; want JSON", tc.name, out)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(out, "INFO") {
|
||||
t.Errorf("%s: got %q; want text INFO", tc.name, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Starts syslog if configured, returns true if it was started
|
||||
func startSysLog() bool {
|
||||
func startSysLog(handler *OutputHandler) bool {
|
||||
fs.Fatalf(nil, "--syslog not supported on %s platform", runtime.GOOS)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"log/syslog"
|
||||
"os"
|
||||
"path"
|
||||
@@ -39,7 +39,7 @@ var (
|
||||
)
|
||||
|
||||
// Starts syslog
|
||||
func startSysLog() bool {
|
||||
func startSysLog(handler *OutputHandler) bool {
|
||||
facility, ok := syslogFacilityMap[Opt.SyslogFacility]
|
||||
if !ok {
|
||||
fs.Fatalf(nil, "Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility)
|
||||
@@ -49,27 +49,27 @@ func startSysLog() bool {
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start syslog: %v", err)
|
||||
}
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(w)
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
|
||||
handler.setFormatFlags(logFormatNoLevel)
|
||||
handler.SetOutput(func(level slog.Level, text string) {
|
||||
switch level {
|
||||
case fs.LogLevelEmergency:
|
||||
case fs.SlogLevelEmergency:
|
||||
_ = w.Emerg(text)
|
||||
case fs.LogLevelAlert:
|
||||
case fs.SlogLevelAlert:
|
||||
_ = w.Alert(text)
|
||||
case fs.LogLevelCritical:
|
||||
case fs.SlogLevelCritical:
|
||||
_ = w.Crit(text)
|
||||
case fs.LogLevelError:
|
||||
case slog.LevelError:
|
||||
_ = w.Err(text)
|
||||
case fs.LogLevelWarning:
|
||||
case slog.LevelWarn:
|
||||
_ = w.Warning(text)
|
||||
case fs.LogLevelNotice:
|
||||
case fs.SlogLevelNotice:
|
||||
_ = w.Notice(text)
|
||||
case fs.LogLevelInfo:
|
||||
case slog.LevelInfo:
|
||||
_ = w.Info(text)
|
||||
case fs.LogLevelDebug:
|
||||
case slog.LevelDebug:
|
||||
_ = w.Debug(text)
|
||||
}
|
||||
}
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Enables systemd logs if configured or if auto-detected
|
||||
func startSystemdLog() bool {
|
||||
func startSystemdLog(handler *OutputHandler) bool {
|
||||
fs.Fatalf(nil, "--log-systemd not supported on %s platform", runtime.GOOS)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -7,54 +7,47 @@ package log
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Enables systemd logs if configured or if auto-detected
|
||||
func startSystemdLog() bool {
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if strings.Contains(flagsStr, ",longfile,") {
|
||||
flags |= log.Llongfile
|
||||
}
|
||||
if strings.Contains(flagsStr, ",shortfile,") {
|
||||
flags |= log.Lshortfile
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
func startSystemdLog(handler *OutputHandler) bool {
|
||||
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
|
||||
handler.setFormatFlags(logFormatNoLevel)
|
||||
// TODO: Use the native journal.Print approach rather than a custom implementation
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
handler.SetOutput(func(level slog.Level, text string) {
|
||||
text = fmt.Sprintf("<%s>%-6s: %s", systemdLogPrefix(level), level, text)
|
||||
_ = log.Output(4, text)
|
||||
}
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
var logLevelToSystemdPrefix = []journal.Priority{
|
||||
fs.LogLevelEmergency: journal.PriEmerg,
|
||||
fs.LogLevelAlert: journal.PriAlert,
|
||||
fs.LogLevelCritical: journal.PriCrit,
|
||||
fs.LogLevelError: journal.PriErr,
|
||||
fs.LogLevelWarning: journal.PriWarning,
|
||||
fs.LogLevelNotice: journal.PriNotice,
|
||||
fs.LogLevelInfo: journal.PriInfo,
|
||||
fs.LogLevelDebug: journal.PriDebug,
|
||||
var slogLevelToSystemdPrefix = map[slog.Level]journal.Priority{
|
||||
fs.SlogLevelEmergency: journal.PriEmerg,
|
||||
fs.SlogLevelAlert: journal.PriAlert,
|
||||
fs.SlogLevelCritical: journal.PriCrit,
|
||||
slog.LevelError: journal.PriErr,
|
||||
slog.LevelWarn: journal.PriWarning,
|
||||
fs.SlogLevelNotice: journal.PriNotice,
|
||||
slog.LevelInfo: journal.PriInfo,
|
||||
slog.LevelDebug: journal.PriDebug,
|
||||
}
|
||||
|
||||
func systemdLogPrefix(l fs.LogLevel) string {
|
||||
if l >= fs.LogLevel(len(logLevelToSystemdPrefix)) {
|
||||
func systemdLogPrefix(l slog.Level) string {
|
||||
prio, ok := slogLevelToSystemdPrefix[l]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return strconv.Itoa(int(logLevelToSystemdPrefix[l]))
|
||||
return strconv.Itoa(int(prio))
|
||||
}
|
||||
|
||||
func isJournalStream() bool {
|
||||
if usingJournald, _ := journal.StderrIsJournalStream(); usingJournald {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -60,9 +61,9 @@ type Marcher interface {
|
||||
// Note: this will flag filter-aware backends on the source side
|
||||
func (m *March) init(ctx context.Context) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll)
|
||||
m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcIncludeAll, m.srcKey)
|
||||
if !m.NoTraverse {
|
||||
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll)
|
||||
m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstIncludeAll, m.dstKey)
|
||||
}
|
||||
// Now create the matching transform
|
||||
// ..normalise the UTF8 first
|
||||
@@ -80,13 +81,26 @@ func (m *March) init(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// key turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) key(entry fs.DirEntry) string {
|
||||
// srcKey turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) srcKey(entry fs.DirEntry) string {
|
||||
if entry == nil {
|
||||
return ""
|
||||
}
|
||||
name := path.Base(entry.Remote())
|
||||
for _, transform := range m.transforms {
|
||||
name = transform.Path(m.Ctx, name, fs.DirEntryType(entry) == "directory")
|
||||
return transforms(name, m.transforms)
|
||||
}
|
||||
|
||||
// dstKey turns a directory entry into a sort key using the defined transforms.
|
||||
func (m *March) dstKey(entry fs.DirEntry) string {
|
||||
if entry == nil {
|
||||
return ""
|
||||
}
|
||||
return transforms(path.Base(entry.Remote()), m.transforms)
|
||||
}
|
||||
|
||||
func transforms(name string, transforms []matchTransformFn) string {
|
||||
for _, transform := range transforms {
|
||||
name = transform(name)
|
||||
}
|
||||
return name
|
||||
@@ -95,14 +109,14 @@ func (m *March) key(entry fs.DirEntry) string {
|
||||
// makeListDir makes constructs a listing function for the given fs
|
||||
// and includeAll flags for marching through the file system.
|
||||
// Note: this will optionally flag filter-aware backends!
|
||||
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listDirFn {
|
||||
func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool, keyFn list.KeyFn) listDirFn {
|
||||
ci := fs.GetConfig(ctx)
|
||||
fi := filter.GetConfig(ctx)
|
||||
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
|
||||
!(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse)
|
||||
return func(dir string, callback fs.ListRCallback) (err error) {
|
||||
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
|
||||
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, m.key)
|
||||
return list.DirSortedFn(dirCtx, f, includeAll, dir, callback, keyFn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +151,7 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
|
||||
// in syncing as it will use the first entry for the sync
|
||||
// comparison.
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(m.key(a), m.key(b))
|
||||
return cmp.Compare(keyFn(a), keyFn(b))
|
||||
})
|
||||
return callback(entries)
|
||||
}
|
||||
@@ -290,11 +304,11 @@ func (m *March) matchListings(srcChan, dstChan <-chan fs.DirEntry, srcOnly, dstO
|
||||
// Reload src and dst if needed - we set them to nil if used
|
||||
if src == nil {
|
||||
src = <-srcChan
|
||||
srcName = m.key(src)
|
||||
srcName = m.srcKey(src)
|
||||
}
|
||||
if dst == nil {
|
||||
dst = <-dstChan
|
||||
dstName = m.key(dst)
|
||||
dstName = m.dstKey(dst)
|
||||
}
|
||||
if src == nil && dst == nil {
|
||||
break
|
||||
@@ -399,7 +413,7 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
|
||||
if m.NoTraverse && !m.NoCheckDest {
|
||||
originalSrcChan := srcChan
|
||||
srcChan = make(chan fs.DirEntry, 100)
|
||||
ls, err := list.NewSorter(m.Ctx, m.Fdst, list.SortToChan(dstChan), m.key)
|
||||
ls, err := list.NewSorter(m.Ctx, m.Fdst, list.SortToChan(dstChan), m.dstKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -449,7 +463,6 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
|
||||
noDst: true,
|
||||
})
|
||||
}
|
||||
|
||||
}, func(dst fs.DirEntry) {
|
||||
recurse := m.Callback.DstOnly(dst)
|
||||
if recurse && job.dstDepth > 0 {
|
||||
|
||||
@@ -491,7 +491,11 @@ func TestMatchListings(t *testing.T) {
|
||||
// Make a channel to send the source (0) or dest (1) using a list.Sorter
|
||||
makeChan := func(offset int) <-chan fs.DirEntry {
|
||||
out := make(chan fs.DirEntry)
|
||||
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), m.key)
|
||||
key := m.dstKey
|
||||
if offset == 0 {
|
||||
key = m.srcKey
|
||||
}
|
||||
ls, err := list.NewSorter(ctx, nil, list.SortToChan(out), key)
|
||||
require.NoError(t, err)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -65,18 +64,16 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: r.Fremote,
|
||||
Fsrc: r.Flocal,
|
||||
OneWay: oneway,
|
||||
}
|
||||
addBuffers(&opt)
|
||||
err := checkFunction(ctx, &opt)
|
||||
var err error
|
||||
buf := bilib.CaptureOutput(func() {
|
||||
err = checkFunction(ctx, &opt)
|
||||
})
|
||||
gotErrors := accounting.GlobalStats().GetErrors()
|
||||
gotChecks := accounting.GlobalStats().GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
@@ -88,7 +85,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
|
||||
if gotChecks > 0 && !strings.Contains(string(buf), "matching files") {
|
||||
t.Errorf("%d: Total files matching line missing", i)
|
||||
}
|
||||
if wantChecks != gotChecks {
|
||||
@@ -389,9 +386,6 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
|
||||
checkRun := func(runNo, wantChecks, wantErrors int, want wantType) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
opt := operations.CheckOpt{
|
||||
Combined: new(bytes.Buffer),
|
||||
@@ -401,8 +395,10 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
MissingOnSrc: new(bytes.Buffer),
|
||||
MissingOnDst: new(bytes.Buffer),
|
||||
}
|
||||
err := operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
|
||||
|
||||
var err error
|
||||
buf := bilib.CaptureOutput(func() {
|
||||
err = operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
|
||||
})
|
||||
gotErrors := int(accounting.GlobalStats().GetErrors())
|
||||
if wantErrors == 0 {
|
||||
assert.NoError(t, err, "unexpected error in run %d", runNo)
|
||||
@@ -414,7 +410,7 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
|
||||
gotChecks := int(accounting.GlobalStats().GetChecks())
|
||||
if wantChecks > 0 || gotChecks > 0 {
|
||||
assert.Contains(t, buf.String(), "matching files", "missing matching files in run %d", runNo)
|
||||
assert.Contains(t, string(buf), "matching files", "missing matching files in run %d", runNo)
|
||||
}
|
||||
assert.Equal(t, wantChecks, gotChecks, "wrong number of checks in run %d", runNo)
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
)
|
||||
|
||||
// State of the copy
|
||||
@@ -390,7 +391,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
f: f,
|
||||
dstFeatures: f.Features(),
|
||||
dst: dst,
|
||||
remote: remote,
|
||||
remote: transform.Path(ctx, remote, false),
|
||||
src: src,
|
||||
ci: ci,
|
||||
tr: tr,
|
||||
@@ -399,7 +400,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
}
|
||||
c.hashType, c.hashOption = CommonHash(ctx, f, src.Fs())
|
||||
if c.dst != nil {
|
||||
c.remote = c.dst.Remote()
|
||||
c.remote = transform.Path(ctx, c.dst.Remote(), false)
|
||||
}
|
||||
// Are we using partials?
|
||||
//
|
||||
@@ -414,5 +415,5 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
|
||||
// CopyFile moves a single file possibly to a new name
|
||||
func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true)
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true, false)
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -424,6 +425,8 @@ func MoveTransfer(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string,
|
||||
|
||||
// move - see Move for help
|
||||
func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object, isTransfer bool) (newDst fs.Object, err error) {
|
||||
origRemote := remote // avoid double-transform on fallback to copy
|
||||
remote = transform.Path(ctx, remote, false)
|
||||
ci := fs.GetConfig(ctx)
|
||||
var tr *accounting.Transfer
|
||||
if isTransfer {
|
||||
@@ -447,12 +450,14 @@ func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && (fdst.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
|
||||
if dst != nil {
|
||||
remote = dst.Remote()
|
||||
remote = transform.Path(ctx, dst.Remote(), false)
|
||||
if !SameObject(src, dst) {
|
||||
err = DeleteFile(ctx, dst)
|
||||
if err != nil {
|
||||
return newDst, err
|
||||
}
|
||||
} else if src.Remote() == remote {
|
||||
return newDst, nil
|
||||
} else if needsMoveCaseInsensitive(fdst, fdst, remote, src.Remote(), false) {
|
||||
doMove = func(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
return MoveCaseInsensitive(ctx, fdst, fdst, remote, src.Remote(), false, src)
|
||||
@@ -488,7 +493,7 @@ func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
}
|
||||
}
|
||||
// Move not found or didn't work so copy dst <- src
|
||||
newDst, err = Copy(ctx, fdst, dst, remote, src)
|
||||
newDst, err = Copy(ctx, fdst, dst, origRemote, src)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "Not deleting source as copy failed: %v", err)
|
||||
return newDst, err
|
||||
@@ -516,24 +521,7 @@ func SuffixName(ctx context.Context, remote string) string {
|
||||
return remote
|
||||
}
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + ci.Suffix + exts
|
||||
return transform.SuffixKeepExtension(remote, ci.Suffix)
|
||||
}
|
||||
return remote + ci.Suffix
|
||||
}
|
||||
@@ -1994,12 +1982,12 @@ func MoveCaseInsensitive(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileNam
|
||||
}
|
||||
|
||||
// moveOrCopyFile moves or copies a single file possibly to a new name
|
||||
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
|
||||
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool, allowOverlap bool) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
logger, usingLogger := GetLogger(ctx)
|
||||
dstFilePath := path.Join(fdst.Root(), dstFileName)
|
||||
srcFilePath := path.Join(fsrc.Root(), srcFileName)
|
||||
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
|
||||
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath && !allowOverlap {
|
||||
fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
|
||||
if usingLogger {
|
||||
srcObj, _ := fsrc.NewObject(ctx, srcFileName)
|
||||
@@ -2106,7 +2094,14 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
//
|
||||
// This is treated as a transfer.
|
||||
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)
|
||||
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false, false)
|
||||
}
|
||||
|
||||
// TransformFile transforms a file in place using --name-transform
|
||||
//
|
||||
// This is treated as a transfer.
|
||||
func TransformFile(ctx context.Context, fdst fs.Fs, srcFileName string) (err error) {
|
||||
return moveOrCopyFile(ctx, fdst, fdst, srcFileName, srcFileName, false, true)
|
||||
}
|
||||
|
||||
// SetTier changes tier of object in remote
|
||||
@@ -2211,50 +2206,10 @@ func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
|
||||
|
||||
// AddModTime adds file's Mod Time to output
|
||||
func (l *ListFormat) AddModTime(timeFormat string) {
|
||||
switch timeFormat {
|
||||
case "":
|
||||
if timeFormat == "" {
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
} else {
|
||||
timeFormat = transform.TimeFormat(timeFormat)
|
||||
}
|
||||
l.AppendOutput(func(entry *ListJSONItem) string {
|
||||
return entry.ModTime.When.Local().Format(timeFormat)
|
||||
|
||||
@@ -62,7 +62,7 @@ func rcList(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
var list = []*ListJSONItem{}
|
||||
list := []*ListJSONItem{}
|
||||
err = ListJSON(ctx, f, remote, &opt, func(item *ListJSONItem) error {
|
||||
list = append(list, item)
|
||||
return nil
|
||||
@@ -193,7 +193,7 @@ func rcMoveOrCopyFile(ctx context.Context, in rc.Params, cp bool) (out rc.Params
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp)
|
||||
return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp, false)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -289,7 +289,6 @@ func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bo
|
||||
|
||||
var request *http.Request
|
||||
request, err := in.GetHTTPRequest()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -629,12 +628,12 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var opt = map[string]string{}
|
||||
opt := map[string]string{}
|
||||
err = in.GetStructMissingOK("opt", &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var arg = []string{}
|
||||
arg := []string{}
|
||||
err = in.GetStructMissingOK("arg", &arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -642,7 +641,6 @@ func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
result, err := doCommand(ctx, command, arg, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("command %q failed: %w", command, err)
|
||||
|
||||
}
|
||||
out = make(rc.Params)
|
||||
out["result"] = result
|
||||
@@ -685,7 +683,6 @@ func rcDu(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
dir, err := in.GetString("dir")
|
||||
if rc.IsErrParamNotFound(err) {
|
||||
dir = config.GetCacheDir()
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -95,6 +96,7 @@ type syncCopyMove struct {
|
||||
setDirModTimes []setDirModTime // directories that need their modtime set
|
||||
setDirModTimesMaxLevel int // max level of the directories to set
|
||||
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
||||
allowOverlap bool // whether we allow src and dst to overlap (i.e. for convmv)
|
||||
}
|
||||
|
||||
// For keeping track of delayed modtime sets
|
||||
@@ -126,8 +128,8 @@ func (strategy trackRenamesStrategy) leaf() bool {
|
||||
return (strategy & trackRenamesStrategyLeaf) != 0
|
||||
}
|
||||
|
||||
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) {
|
||||
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool, allowOverlap bool) (*syncCopyMove, error) {
|
||||
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) && !allowOverlap {
|
||||
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -161,6 +163,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
setDirModTime: (!ci.NoUpdateDirModTime && fsrc.Features().CanHaveEmptyDirectories) && (fdst.Features().WriteDirSetModTime || fdst.Features().MkdirMetadata != nil || fdst.Features().DirSetModTime != nil),
|
||||
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
||||
modifiedDirs: make(map[string]struct{}),
|
||||
allowOverlap: allowOverlap,
|
||||
}
|
||||
|
||||
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
||||
@@ -922,7 +925,7 @@ func (s *syncCopyMove) tryRename(src fs.Object) bool {
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func (s *syncCopyMove) run() error {
|
||||
if operations.Same(s.fdst, s.fsrc) {
|
||||
if operations.Same(s.fdst, s.fsrc) && !s.allowOverlap {
|
||||
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
|
||||
return nil
|
||||
}
|
||||
@@ -1122,6 +1125,9 @@ func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Dire
|
||||
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
||||
}
|
||||
}
|
||||
if transform.Transforming(ctx) && newDst != nil && src.Remote() != newDst.Remote() {
|
||||
s.markParentNotEmpty(src)
|
||||
}
|
||||
// If we need to set modtime after and we created a dir, then save it for later
|
||||
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
||||
if newDst != nil {
|
||||
@@ -1254,8 +1260,8 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
s.logger(s.ctx, operations.MissingOnDst, src, nil, fs.ErrorIsDir)
|
||||
|
||||
// Create the directory and make sure the Metadata/ModTime is correct
|
||||
s.copyDirMetadata(s.ctx, s.fdst, nil, x.Remote(), x)
|
||||
s.markDirModified(x.Remote())
|
||||
s.copyDirMetadata(s.ctx, s.fdst, nil, transform.Path(s.ctx, x.Remote(), true), x)
|
||||
s.markDirModified(transform.Path(s.ctx, x.Remote(), true))
|
||||
return true
|
||||
default:
|
||||
panic("Bad object in DirEntries")
|
||||
@@ -1288,7 +1294,11 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
}
|
||||
case fs.Directory:
|
||||
// Do the same thing to the entire contents of the directory
|
||||
s.markParentNotEmpty(src)
|
||||
srcX = fs.NewOverrideDirectory(srcX, transform.Path(ctx, src.Remote(), true))
|
||||
src = srcX
|
||||
if !transform.Transforming(ctx) || src.Remote() != dst.Remote() {
|
||||
s.markParentNotEmpty(src)
|
||||
}
|
||||
dstX, ok := dst.(fs.Directory)
|
||||
if ok {
|
||||
s.logger(s.ctx, operations.Match, src, dst, fs.ErrorIsDir)
|
||||
@@ -1327,7 +1337,7 @@ func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse
|
||||
// If DoMove is true then files will be moved instead of copied.
|
||||
//
|
||||
// dir is the start directory, "" for root
|
||||
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool, allowOverlap bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if deleteMode != fs.DeleteModeOff && DoMove {
|
||||
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
||||
@@ -1338,7 +1348,7 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
||||
}
|
||||
// only delete stuff during in this pass
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs, allowOverlap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1349,7 +1359,7 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
// Next pass does a copy only
|
||||
deleteMode = fs.DeleteModeOff
|
||||
}
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs, allowOverlap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1359,17 +1369,22 @@ func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
// Sync fsrc into fdst
|
||||
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// CopyDir copies fsrc into fdst
|
||||
func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// moveDir moves fsrc into fdst
|
||||
func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
||||
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs, false)
|
||||
}
|
||||
|
||||
// Transform renames fdst in place
|
||||
func Transform(ctx context.Context, fdst fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
||||
return runSyncCopyMove(ctx, fdst, fdst, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs, true)
|
||||
}
|
||||
|
||||
// MoveDir moves fsrc into fdst
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
@@ -2980,7 +2981,7 @@ func predictDstFromLogger(ctx context.Context) context.Context {
|
||||
if winner.Err != nil {
|
||||
errMsg = ";" + winner.Err.Error()
|
||||
}
|
||||
operations.SyncFprintf(opt.JSON, "%s;%s;%v;%s%s\n", file.ModTime(ctx).Local().Format(timeFormat), checksum, file.Size(), file.Remote(), errMsg)
|
||||
operations.SyncFprintf(opt.JSON, "%s;%s;%v;%s%s\n", file.ModTime(ctx).Local().Format(timeFormat), checksum, file.Size(), transform.Path(ctx, file.Remote(), false), errMsg) // TODO: should the transform be handled in the sync instead of here?
|
||||
}
|
||||
}
|
||||
return operations.WithSyncLogger(ctx, opt)
|
||||
|
||||
483
fs/sync/sync_transform_test.go
Normal file
483
fs/sync/sync_transform_test.go
Normal file
@@ -0,0 +1,483 @@
|
||||
// Test transform
|
||||
|
||||
package sync
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
var debug = ``
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
type args struct {
|
||||
TransformOpt []string
|
||||
TransformBackOpt []string
|
||||
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{name: "NFC", args: args{
|
||||
TransformOpt: []string{"nfc"},
|
||||
TransformBackOpt: []string{"nfd"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFD", args: args{
|
||||
TransformOpt: []string{"nfd"},
|
||||
TransformBackOpt: []string{"nfc"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "base64", args: args{
|
||||
TransformOpt: []string{"base64encode"},
|
||||
TransformBackOpt: []string{"base64encode"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "prefix", args: args{
|
||||
TransformOpt: []string{"prefix=PREFIX"},
|
||||
TransformBackOpt: []string{"trimprefix=PREFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "suffix", args: args{
|
||||
TransformOpt: []string{"suffix=SUFFIX"},
|
||||
TransformBackOpt: []string{"trimsuffix=SUFFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "truncate", args: args{
|
||||
TransformOpt: []string{"truncate=10"},
|
||||
TransformBackOpt: []string{"truncate=10"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "encoder", args: args{
|
||||
TransformOpt: []string{"encoder=Colon,SquareBracket"},
|
||||
TransformBackOpt: []string{"decoder=Colon,SquareBracket"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "ISO-8859-1", args: args{
|
||||
TransformOpt: []string{"ISO-8859-1"},
|
||||
TransformBackOpt: []string{"ISO-8859-1"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "charmap", args: args{
|
||||
TransformOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "lowercase", args: args{
|
||||
TransformOpt: []string{"all,lowercase"},
|
||||
TransformBackOpt: []string{"all,lowercase"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "ascii", args: args{
|
||||
TransformOpt: []string{"all,ascii"},
|
||||
TransformBackOpt: []string{"all,ascii"},
|
||||
Lossless: false,
|
||||
}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Flocal)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
items := makeTestFiles(t, r, "dir1")
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, nil)
|
||||
r.CheckLocalListing(t, items, nil)
|
||||
|
||||
err := transform.SetOptions(ctx, tt.args.TransformOpt...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
|
||||
require.NoError(t, err)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
|
||||
if tt.args.Lossless {
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteItems(t, items...)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const alphabet = "abcdefg123456789"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
t.Helper()
|
||||
n := 0
|
||||
// Create test files
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName = strings.ToValidUTF8(fileName, "")
|
||||
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
|
||||
|
||||
if debug != "" {
|
||||
fileName = debug
|
||||
}
|
||||
|
||||
item := r.WriteObject(context.Background(), fileName, fileName, t1)
|
||||
r.WriteFile(fileName, fileName, t1)
|
||||
items = append(items, item)
|
||||
n++
|
||||
|
||||
if debug != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, extra := range extras {
|
||||
item := r.WriteObject(context.Background(), extra, extra, t1)
|
||||
r.WriteFile(extra, extra, t1)
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
func deleteDSStore(t *testing.T, r *fstest.Run) {
|
||||
ctxDSStore, fi := filter.AddConfig(context.Background())
|
||||
err := fi.AddRule(`+ *.DS_Store`)
|
||||
assert.NoError(t, err)
|
||||
err = fi.AddRule(`- **`)
|
||||
assert.NoError(t, err)
|
||||
err = operations.Delete(ctxDSStore, r.Fremote)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
var entries fs.DirEntries
|
||||
|
||||
deleteDSStore(t, r)
|
||||
err := walk.ListR(context.Background(), r.Fremote, "", true, -1, walk.ListObjects, func(e fs.DirEntries) error {
|
||||
entries = append(entries, e...)
|
||||
return nil
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
entries = slices.DeleteFunc(entries, func(E fs.DirEntry) bool { // remove those pesky .DS_Store files
|
||||
if strings.Contains(E.Remote(), ".DS_Store") {
|
||||
err := operations.DeleteFile(context.Background(), E.(fs.Object))
|
||||
assert.NoError(t, err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
require.Equal(t, len(items), entries.Len())
|
||||
|
||||
// sort by CONVERTED name
|
||||
slices.SortStableFunc(items, func(a, b fstest.Item) int {
|
||||
aConv := transform.Path(ctx, a.Path, false)
|
||||
bConv := transform.Path(ctx, b.Path, false)
|
||||
return cmp.Compare(aConv, bConv)
|
||||
})
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
return cmp.Compare(a.Remote(), b.Remote())
|
||||
})
|
||||
|
||||
for i, e := range entries {
|
||||
expect := transform.Path(ctx, items[i].Path, false)
|
||||
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
|
||||
assert.Equal(t, expect, e.Remote(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func detectEncoding(s string) string {
|
||||
if norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "BOTH"
|
||||
}
|
||||
if !norm.NFC.IsNormalString(s) && norm.NFD.IsNormalString(s) {
|
||||
return "NFD"
|
||||
}
|
||||
if norm.NFC.IsNormalString(s) && !norm.NFD.IsNormalString(s) {
|
||||
return "NFC"
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
|
||||
func TestTransformCopy(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,suffix_keep_extension=_somesuffix")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("sub dir/hello world.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("sub dir_somesuffix/hello world_somesuffix.txt", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestDoubleTransform(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestFileTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "file,prefix=tac", "file,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestNoTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "prefix=tac", "prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/tictactoe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestDirTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "dir,prefix=tac", "dir,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"empty_dir", "toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
}
|
||||
|
||||
func TestAllTag(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"empty_dir", "toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
err = operations.Check(ctx, &operations.CheckOpt{Fsrc: r.Flocal, Fdst: r.Fremote}) // should not error even though dst has transformed names
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestRunTwice(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "dir,prefix=tac", "dir,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1))
|
||||
|
||||
// result should not change second time, since src is unchanged
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("tictactoe/tictactoe/toe.txt", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestSyntax(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := transform.SetOptions(ctx, "prefix")
|
||||
assert.Error(t, err) // should error as required value is missing
|
||||
|
||||
err = transform.SetOptions(ctx, "banana")
|
||||
assert.Error(t, err) // should error as unrecognized option
|
||||
|
||||
err = transform.SetOptions(ctx, "=123")
|
||||
assert.Error(t, err) // should error as required key is missing
|
||||
|
||||
err = transform.SetOptions(ctx, "prefix=123")
|
||||
assert.NoError(t, err) // should not error
|
||||
}
|
||||
|
||||
func TestConflicting(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "prefix=tac", "trimprefix=tac")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
// should result in no change as prefix and trimprefix cancel out
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("toe/toe/toe", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestMove(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = MoveDir(ctx, r.Fremote, r.Flocal, true, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
}
|
||||
|
||||
func TestTransformFile(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
_, err = operations.MkdirModTime(ctx, r.Flocal, "empty_dir", t1)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = MoveDir(ctx, r.Fremote, r.Flocal, true, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("tictactoe/tictactoe/tictactoe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe"})
|
||||
|
||||
err = transform.SetOptions(ctx, "all,trimprefix=tic", "all,trimprefix=tac")
|
||||
require.NoError(t, err)
|
||||
err = operations.TransformFile(ctx, r.Fremote, "tictactoe/tictactoe/tictactoe.txt")
|
||||
require.NoError(t, err)
|
||||
r.CheckLocalListing(t, []fstest.Item{}, []string{})
|
||||
r.CheckRemoteListing(t, []fstest.Item{fstest.NewItem("toe/toe/toe.txt", "hello world", t1)}, []string{"tictacempty_dir", "tictactoe", "tictactoe/tictactoe", "toe", "toe/toe"})
|
||||
}
|
||||
|
||||
func TestBase64(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,base64encode")
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe.txt", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("dG9l/dG9l/dG9lLnR4dA==", "hello world", t1))
|
||||
|
||||
// round trip
|
||||
err = transform.SetOptions(ctx, "all,base64decode")
|
||||
require.NoError(t, err)
|
||||
ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Flocal, r.Fremote, true)
|
||||
testLoggerVsLsf(ctx, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.CheckLocalItems(t, file1)
|
||||
r.CheckRemoteItems(t, fstest.NewItem("dG9l/dG9l/dG9lLnR4dA==", "hello world", t1))
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
err := transform.SetOptions(ctx, "all,prefix=ta/c") // has illegal character
|
||||
require.NoError(t, err)
|
||||
file1 := r.WriteFile("toe/toe/toe", "hello world", t1)
|
||||
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
// ctx = predictDstFromLogger(ctx)
|
||||
err = Sync(ctx, r.Fremote, r.Flocal, true)
|
||||
// testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t)
|
||||
assert.Error(t, err)
|
||||
|
||||
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -66,7 +66,6 @@ require (
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/shirou/gopsutil/v4 v4.25.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
@@ -138,7 +137,7 @@ require (
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.2 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
@@ -208,6 +207,7 @@ require (
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
github.com/samber/lo v1.47.0 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -219,8 +219,8 @@ github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq4
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
|
||||
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
|
||||
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
|
||||
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
|
||||
|
||||
@@ -151,8 +151,8 @@ func init() {
|
||||
alias("Dot", EncodeDot)
|
||||
}
|
||||
|
||||
// validStrings returns all the valid MultiEncoder strings
|
||||
func validStrings() string {
|
||||
// ValidStrings returns all the valid MultiEncoder strings
|
||||
func ValidStrings() string {
|
||||
var out []string
|
||||
for k := range nameToEncoding {
|
||||
out = append(out, k)
|
||||
@@ -192,7 +192,7 @@ func (mask *MultiEncoder) Set(in string) error {
|
||||
} else {
|
||||
i, err := strconv.ParseUint(part, 0, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad encoding %q: possible values are: %s", part, validStrings())
|
||||
return fmt.Errorf("bad encoding %q: possible values are: %s", part, ValidStrings())
|
||||
}
|
||||
out |= MultiEncoder(i)
|
||||
}
|
||||
@@ -313,8 +313,7 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeAsterisk) { // *
|
||||
switch r {
|
||||
case '*',
|
||||
'*':
|
||||
case '*', '*':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -346,64 +345,55 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeQuestion) { // ?
|
||||
switch r {
|
||||
case '?',
|
||||
'?':
|
||||
case '?', '?':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeColon) { // :
|
||||
switch r {
|
||||
case ':',
|
||||
':':
|
||||
case ':', ':':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodePipe) { // |
|
||||
switch r {
|
||||
case '|',
|
||||
'|':
|
||||
case '|', '|':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeDoubleQuote) { // "
|
||||
switch r {
|
||||
case '"',
|
||||
'"':
|
||||
case '"', '"':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeSingleQuote) { // '
|
||||
switch r {
|
||||
case '\'',
|
||||
''':
|
||||
case '\'', ''':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeBackQuote) { // `
|
||||
switch r {
|
||||
case '`',
|
||||
'`':
|
||||
case '`', '`':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeDollar) { // $
|
||||
switch r {
|
||||
case '$',
|
||||
'$':
|
||||
case '$', '$':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeSlash) { // /
|
||||
switch r {
|
||||
case '/',
|
||||
'/':
|
||||
case '/', '/':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodeBackSlash) { // \
|
||||
switch r {
|
||||
case '\\',
|
||||
'\':
|
||||
case '\\', '\':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -416,15 +406,13 @@ func (mask MultiEncoder) Encode(in string) string {
|
||||
}
|
||||
if mask.Has(EncodeHash) { // #
|
||||
switch r {
|
||||
case '#',
|
||||
'#':
|
||||
case '#', '#':
|
||||
return true
|
||||
}
|
||||
}
|
||||
if mask.Has(EncodePercent) { // %
|
||||
switch r {
|
||||
case '%',
|
||||
'%':
|
||||
case '%', '%':
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -1182,6 +1170,7 @@ func appendQuotedBytes(w io.Writer, s string) {
|
||||
_, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b)
|
||||
}
|
||||
}
|
||||
|
||||
func appendUnquotedByte(w io.Writer, s string) bool {
|
||||
if len(s) < 2 {
|
||||
return false
|
||||
@@ -1202,12 +1191,15 @@ func (identity) Decode(in string) string { return in }
|
||||
func (i identity) FromStandardPath(s string) string {
|
||||
return FromStandardPath(i, s)
|
||||
}
|
||||
|
||||
func (i identity) FromStandardName(s string) string {
|
||||
return FromStandardName(i, s)
|
||||
}
|
||||
|
||||
func (i identity) ToStandardPath(s string) string {
|
||||
return ToStandardPath(i, s)
|
||||
}
|
||||
|
||||
func (i identity) ToStandardName(s string) string {
|
||||
return ToStandardName(i, s)
|
||||
}
|
||||
|
||||
71
lib/transform/cmap.go
Normal file
71
lib/transform/cmap.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
)
|
||||
|
||||
var (
|
||||
cmaps = map[int]*charmap.Charmap{}
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
type cmapChoices struct{}
|
||||
|
||||
func (cmapChoices) Choices() []string {
|
||||
choices := make([]string, 1)
|
||||
i := 0
|
||||
for _, enc := range charmap.All {
|
||||
c, ok := enc.(*charmap.Charmap)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
name := strings.ReplaceAll(c.String(), " ", "-")
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("unknown-%d", i)
|
||||
}
|
||||
lock.Lock()
|
||||
cmaps[i] = c
|
||||
lock.Unlock()
|
||||
choices = append(choices, name)
|
||||
i++
|
||||
}
|
||||
return choices
|
||||
}
|
||||
|
||||
func (cmapChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
|
||||
lock.Lock()
|
||||
c, ok := cmaps[int(cm)]
|
||||
lock.Unlock()
|
||||
if ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
b, ok := cmap.EncodeRune(r)
|
||||
if !ok {
|
||||
return '_'
|
||||
}
|
||||
return cmap.DecodeByte(b)
|
||||
}, s)
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r <= 127 {
|
||||
return r
|
||||
}
|
||||
return -1
|
||||
}, s)
|
||||
}
|
||||
136
lib/transform/help.go
Normal file
136
lib/transform/help.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
type commands struct {
|
||||
command string
|
||||
description string
|
||||
}
|
||||
|
||||
type example struct {
|
||||
path string
|
||||
flags []string
|
||||
}
|
||||
|
||||
var commandList = []commands{
|
||||
{command: "--name-transform prefix=XXXX", description: "Prepends XXXX to the file name."},
|
||||
{command: "--name-transform suffix=XXXX", description: "Appends XXXX to the file name after the extension."},
|
||||
{command: "--name-transform suffix_keep_extension=XXXX", description: "Appends XXXX to the file name while preserving the original file extension."},
|
||||
{command: "--name-transform trimprefix=XXXX", description: "Removes XXXX if it appears at the start of the file name."},
|
||||
{command: "--name-transform trimsuffix=XXXX", description: "Removes XXXX if it appears at the end of the file name."},
|
||||
{command: "--name-transform regex=/pattern/replacement/", description: "Applies a regex-based transformation."},
|
||||
{command: "--name-transform replace=old:new", description: "Replaces occurrences of old with new in the file name."},
|
||||
{command: "--name-transform date={YYYYMMDD}", description: "Appends or prefixes the specified date format."},
|
||||
{command: "--name-transform truncate=N", description: "Truncates the file name to a maximum of N characters."},
|
||||
{command: "--name-transform base64encode", description: "Encodes the file name in Base64."},
|
||||
{command: "--name-transform base64decode", description: "Decodes a Base64-encoded file name."},
|
||||
{command: "--name-transform encoder=ENCODING", description: "Converts the file name to the specified encoding (e.g., ISO-8859-1, Windows-1252, Macintosh)."},
|
||||
{command: "--name-transform decoder=ENCODING", description: "Decodes the file name from the specified encoding."},
|
||||
{command: "--name-transform charmap=MAP", description: "Applies a character mapping transformation."},
|
||||
{command: "--name-transform lowercase", description: "Converts the file name to lowercase."},
|
||||
{command: "--name-transform uppercase", description: "Converts the file name to UPPERCASE."},
|
||||
{command: "--name-transform titlecase", description: "Converts the file name to Title Case."},
|
||||
{command: "--name-transform ascii", description: "Strips non-ASCII characters."},
|
||||
{command: "--name-transform url", description: "URL-encodes the file name."},
|
||||
{command: "--name-transform nfc", description: "Converts the file name to NFC Unicode normalization form."},
|
||||
{command: "--name-transform nfd", description: "Converts the file name to NFD Unicode normalization form."},
|
||||
{command: "--name-transform nfkc", description: "Converts the file name to NFKC Unicode normalization form."},
|
||||
{command: "--name-transform nfkd", description: "Converts the file name to NFKD Unicode normalization form."},
|
||||
{command: "--name-transform command=/path/to/my/programfile names.", description: "Executes an external program to transform"},
|
||||
}
|
||||
|
||||
var examples = []example{
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,uppercase"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,base64encode"}},
|
||||
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64decode"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", []string{"all,ascii"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,trimsuffix=.txt"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,charmap=ISO-8859-7"}},
|
||||
{"stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,truncate=21"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
|
||||
{"stories/The Quick Brown Fox!", []string{"date=-{YYYYMMDD}"}},
|
||||
{"stories/The Quick Brown Fox!", []string{"date=-{macfriendlytime}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", []string{"all,regex=[\\.\\w]/ab"}},
|
||||
}
|
||||
|
||||
func (e example) command() string {
|
||||
s := fmt.Sprintf(`rclone convmv %q`, e.path)
|
||||
for _, f := range e.flags {
|
||||
s += fmt.Sprintf(" --name-transform %q", f)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e example) output() string {
|
||||
ctx := context.Background()
|
||||
err := SetOptions(ctx, e.flags...)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error generating help text: %v", err)
|
||||
}
|
||||
return Path(ctx, e.path, false)
|
||||
}
|
||||
|
||||
// go run ./ convmv --help
|
||||
func sprintExamples() string {
|
||||
s := "Examples: \n\n"
|
||||
for _, e := range examples {
|
||||
s += fmt.Sprintf("```\n%s\n", e.command())
|
||||
s += fmt.Sprintf("// Output: %s\n```\n\n", e.output())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func commandTable() string {
|
||||
s := `| Command | Description |
|
||||
|------|------|`
|
||||
for _, c := range commandList {
|
||||
s += fmt.Sprintf("\n| `%s` | %s |", c.command, c.description)
|
||||
}
|
||||
s += "\n\n\n"
|
||||
return s
|
||||
}
|
||||
|
||||
// SprintList returns the example help text as a string
|
||||
func SprintList() string {
|
||||
var algos transformAlgo
|
||||
var charmaps fs.Enum[cmapChoices]
|
||||
s := commandTable()
|
||||
s += fmt.Sprintln("Conversion modes: \n```")
|
||||
for _, v := range algos.Choices() {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += fmt.Sprintln("Char maps: \n```")
|
||||
for _, v := range charmaps.Choices() {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += fmt.Sprintln("Encoding masks: \n```")
|
||||
for _, v := range strings.Split(encoder.ValidStrings(), ",") {
|
||||
s += fmt.Sprintln(v + " ")
|
||||
}
|
||||
s += fmt.Sprintln("```")
|
||||
|
||||
s += sprintExamples()
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList prints the example help text to stdout
|
||||
func PrintList() {
|
||||
fmt.Println(SprintList())
|
||||
}
|
||||
248
lib/transform/options.go
Normal file
248
lib/transform/options.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
type transform struct {
|
||||
key transformAlgo // for example, "prefix"
|
||||
value string // for example, "some_prefix_"
|
||||
tag tag // file, dir, or all
|
||||
}
|
||||
|
||||
// tag controls which part of the file path is affected (file, dir, all)
|
||||
type tag int
|
||||
|
||||
// tag modes
|
||||
const (
|
||||
file tag = iota // Only transform the leaf name of files (default)
|
||||
dir // Only transform name of directories - these may appear anywhere in the path
|
||||
all // Transform the entire path for files and directories
|
||||
)
|
||||
|
||||
// Transforming returns true when transforms are in use
|
||||
func Transforming(ctx context.Context) bool {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return len(ci.NameTransform) > 0
|
||||
}
|
||||
|
||||
// SetOptions sets the options in ctx from flags passed in.
|
||||
// Any existing flags will be overwritten.
|
||||
// s should be in the same format as cmd line flags, i.e. "all,prefix=XXX"
|
||||
func SetOptions(ctx context.Context, s ...string) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.NameTransform = s
|
||||
_, err = getOptions(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// cache to minimize re-parsing
|
||||
var (
|
||||
cachedNameTransform []string
|
||||
cachedOpt []transform
|
||||
cacheLock sync.Mutex
|
||||
)
|
||||
|
||||
// getOptions sets the options from flags passed in.
|
||||
func getOptions(ctx context.Context) (opt []transform, err error) {
|
||||
if !Transforming(ctx) {
|
||||
return opt, nil
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// return cached opt if available
|
||||
if cachedNameTransform != nil && slices.Equal(ci.NameTransform, cachedNameTransform) {
|
||||
return cachedOpt, nil
|
||||
}
|
||||
|
||||
for _, transform := range ci.NameTransform {
|
||||
t, err := parse(transform)
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
opt = append(opt, t)
|
||||
}
|
||||
updateCache(ci.NameTransform, opt)
|
||||
return opt, nil
|
||||
}
|
||||
|
||||
func updateCache(nt []string, o []transform) {
|
||||
cacheLock.Lock()
|
||||
cachedNameTransform = slices.Clone(nt)
|
||||
cachedOpt = o
|
||||
cacheLock.Unlock()
|
||||
}
|
||||
|
||||
// parse a single instance of --name-transform
|
||||
func parse(s string) (t transform, err error) {
|
||||
if s == "" {
|
||||
return t, nil
|
||||
}
|
||||
s = t.parseTag(s)
|
||||
err = t.parseKeyVal(s)
|
||||
return t, err
|
||||
}
|
||||
|
||||
// parse the tag (file/dir/all), set the option accordingly, and return the trimmed string
|
||||
//
|
||||
// we don't worry about errors here because it will error anyway as an invalid key
|
||||
func (t *transform) parseTag(s string) string {
|
||||
if strings.HasPrefix(s, "file,") {
|
||||
t.tag = file
|
||||
return strings.TrimPrefix(s, "file,")
|
||||
}
|
||||
if strings.HasPrefix(s, "dir,") {
|
||||
t.tag = dir
|
||||
return strings.TrimPrefix(s, "dir,")
|
||||
}
|
||||
if strings.HasPrefix(s, "all,") {
|
||||
t.tag = all
|
||||
return strings.TrimPrefix(s, "all,")
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// parse key and value (if any) by splitting on '=' sign
|
||||
// (file/dir/all tag has already been trimmed)
|
||||
func (t *transform) parseKeyVal(s string) (err error) {
|
||||
if !strings.ContainsRune(s, '=') {
|
||||
err = t.key.Set(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t.requiresValue() {
|
||||
fs.Debugf(nil, "received %v", s)
|
||||
return errors.New("value is required for " + t.key.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
split := strings.Split(s, "=")
|
||||
if len(split) != 2 {
|
||||
return errors.New("too many values")
|
||||
}
|
||||
if split[0] == "" {
|
||||
return errors.New("key cannot be blank")
|
||||
}
|
||||
err = t.key.Set(split[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.value = split[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if this particular algorithm requires a value
|
||||
func (t *transform) requiresValue() bool {
|
||||
switch t.key {
|
||||
case ConvFindReplace:
|
||||
return true
|
||||
case ConvPrefix:
|
||||
return true
|
||||
case ConvSuffix:
|
||||
return true
|
||||
case ConvSuffixKeepExtension:
|
||||
return true
|
||||
case ConvTrimPrefix:
|
||||
return true
|
||||
case ConvTrimSuffix:
|
||||
return true
|
||||
case ConvIndex:
|
||||
return true
|
||||
case ConvDate:
|
||||
return true
|
||||
case ConvTruncate:
|
||||
return true
|
||||
case ConvEncoder:
|
||||
return true
|
||||
case ConvDecoder:
|
||||
return true
|
||||
case ConvRegex:
|
||||
return true
|
||||
case ConvCommand:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// transformAlgo describes conversion setting
|
||||
type transformAlgo = fs.Enum[transformChoices]
|
||||
|
||||
// Supported transform options
|
||||
const (
|
||||
ConvNone transformAlgo = iota
|
||||
ConvToNFC
|
||||
ConvToNFD
|
||||
ConvToNFKC
|
||||
ConvToNFKD
|
||||
ConvFindReplace
|
||||
ConvPrefix
|
||||
ConvSuffix
|
||||
ConvSuffixKeepExtension
|
||||
ConvTrimPrefix
|
||||
ConvTrimSuffix
|
||||
ConvIndex
|
||||
ConvDate
|
||||
ConvTruncate
|
||||
ConvBase64Encode
|
||||
ConvBase64Decode
|
||||
ConvEncoder
|
||||
ConvDecoder
|
||||
ConvISO8859_1
|
||||
ConvWindows1252
|
||||
ConvMacintosh
|
||||
ConvCharmap
|
||||
ConvLowercase
|
||||
ConvUppercase
|
||||
ConvTitlecase
|
||||
ConvASCII
|
||||
ConvURL
|
||||
ConvRegex
|
||||
ConvCommand
|
||||
)
|
||||
|
||||
type transformChoices struct{}
|
||||
|
||||
func (transformChoices) Choices() []string {
|
||||
return []string{
|
||||
ConvNone: "none",
|
||||
ConvToNFC: "nfc",
|
||||
ConvToNFD: "nfd",
|
||||
ConvToNFKC: "nfkc",
|
||||
ConvToNFKD: "nfkd",
|
||||
ConvFindReplace: "replace",
|
||||
ConvPrefix: "prefix",
|
||||
ConvSuffix: "suffix",
|
||||
ConvSuffixKeepExtension: "suffix_keep_extension",
|
||||
ConvTrimPrefix: "trimprefix",
|
||||
ConvTrimSuffix: "trimsuffix",
|
||||
ConvIndex: "index",
|
||||
ConvDate: "date",
|
||||
ConvTruncate: "truncate",
|
||||
ConvBase64Encode: "base64encode",
|
||||
ConvBase64Decode: "base64decode",
|
||||
ConvEncoder: "encoder",
|
||||
ConvDecoder: "decoder",
|
||||
ConvISO8859_1: "ISO-8859-1",
|
||||
ConvWindows1252: "Windows-1252",
|
||||
ConvMacintosh: "Macintosh",
|
||||
ConvCharmap: "charmap",
|
||||
ConvLowercase: "lowercase",
|
||||
ConvUppercase: "uppercase",
|
||||
ConvTitlecase: "titlecase",
|
||||
ConvASCII: "ascii",
|
||||
ConvURL: "url",
|
||||
ConvRegex: "regex",
|
||||
ConvCommand: "command",
|
||||
}
|
||||
}
|
||||
|
||||
func (transformChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
335
lib/transform/transform.go
Normal file
335
lib/transform/transform.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Package transform holds functions for path name transformations
|
||||
package transform
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"mime"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Path transforms a path s according to the --name-transform options in use
|
||||
//
|
||||
// If no transforms are in use, s is returned unchanged
|
||||
func Path(ctx context.Context, s string, isDir bool) string {
|
||||
if !Transforming(ctx) {
|
||||
return s
|
||||
}
|
||||
|
||||
old := s
|
||||
opt, err := getOptions(ctx)
|
||||
if err != nil {
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(s, "Failed to parse transform flags: %v", err)
|
||||
}
|
||||
for _, t := range opt {
|
||||
if isDir && t.tag == file {
|
||||
continue
|
||||
}
|
||||
baseOnly := !isDir && t.tag == file
|
||||
if t.tag == dir && !isDir {
|
||||
s, err = transformDir(s, t)
|
||||
} else {
|
||||
s, err = transformPath(s, t, baseOnly)
|
||||
}
|
||||
if err != nil {
|
||||
err = fs.CountError(ctx, err)
|
||||
fs.Errorf(s, "Failed to transform: %v", err)
|
||||
}
|
||||
}
|
||||
if old != s {
|
||||
fs.Debugf(old, "transformed to: %v", s)
|
||||
}
|
||||
if strings.Count(old, "/") != strings.Count(s, "/") {
|
||||
err = fs.CountError(ctx, fmt.Errorf("number of path segments must match: %v (%v), %v (%v)", old, strings.Count(old, "/"), s, strings.Count(s, "/")))
|
||||
fs.Errorf(old, "%v", err)
|
||||
return old
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// transformPath transforms a path string according to the chosen TransformAlgo.
|
||||
// Each path segment is transformed separately, to preserve path separators.
|
||||
// If baseOnly is true, only the base will be transformed (useful for renaming while walking a dir tree recursively.)
|
||||
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
|
||||
// otherwise, the entire is path is transformed.
|
||||
func transformPath(s string, t transform, baseOnly bool) (string, error) {
|
||||
if s == "" || s == "/" || s == "\\" || s == "." {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if baseOnly {
|
||||
transformedBase, err := transformPathSegment(path.Base(s), t)
|
||||
if err := validateSegment(transformedBase); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(path.Dir(s), transformedBase), err
|
||||
}
|
||||
|
||||
segments := strings.Split(s, "/")
|
||||
transformedSegments := make([]string, len(segments))
|
||||
for _, seg := range segments {
|
||||
convSeg, err := transformPathSegment(seg, t)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := validateSegment(convSeg); err != nil {
|
||||
return "", err
|
||||
}
|
||||
transformedSegments = append(transformedSegments, convSeg)
|
||||
}
|
||||
return path.Join(transformedSegments...), nil
|
||||
}
|
||||
|
||||
// transform all but the last path segment
|
||||
func transformDir(s string, t transform) (string, error) {
|
||||
dirPath, err := transformPath(path.Dir(s), t, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path.Join(dirPath, path.Base(s)), nil
|
||||
}
|
||||
|
||||
// transformPathSegment transforms one path segment (or really any string) according to the chosen TransformAlgo.
|
||||
// It assumes path separators have already been trimmed.
|
||||
func transformPathSegment(s string, t transform) (string, error) {
|
||||
switch t.key {
|
||||
case ConvNone:
|
||||
return s, nil
|
||||
case ConvToNFC:
|
||||
return norm.NFC.String(s), nil
|
||||
case ConvToNFD:
|
||||
return norm.NFD.String(s), nil
|
||||
case ConvToNFKC:
|
||||
return norm.NFKC.String(s), nil
|
||||
case ConvToNFKD:
|
||||
return norm.NFKD.String(s), nil
|
||||
case ConvBase64Encode:
|
||||
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
|
||||
case ConvBase64Decode:
|
||||
if s == ".DS_Store" {
|
||||
return s, nil
|
||||
}
|
||||
b, err := base64.URLEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "base64 error")
|
||||
}
|
||||
return string(b), err
|
||||
case ConvFindReplace:
|
||||
split := strings.Split(t.value, ":")
|
||||
if len(split) != 2 {
|
||||
return s, fmt.Errorf("wrong number of values: %v", t.value)
|
||||
}
|
||||
return strings.ReplaceAll(s, split[0], split[1]), nil
|
||||
case ConvPrefix:
|
||||
return t.value + s, nil
|
||||
case ConvSuffix:
|
||||
return s + t.value, nil
|
||||
case ConvSuffixKeepExtension:
|
||||
return SuffixKeepExtension(s, t.value), nil
|
||||
case ConvTrimPrefix:
|
||||
return strings.TrimPrefix(s, t.value), nil
|
||||
case ConvTrimSuffix:
|
||||
return strings.TrimSuffix(s, t.value), nil
|
||||
case ConvTruncate:
|
||||
max, err := strconv.Atoi(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
if max <= 0 {
|
||||
return s, nil
|
||||
}
|
||||
if utf8.RuneCountInString(s) <= max {
|
||||
return s, nil
|
||||
}
|
||||
runes := []rune(s)
|
||||
return string(runes[:max]), nil
|
||||
case ConvEncoder:
|
||||
var enc encoder.MultiEncoder
|
||||
err := enc.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return enc.Encode(s), nil
|
||||
case ConvDecoder:
|
||||
var enc encoder.MultiEncoder
|
||||
err := enc.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return enc.Decode(s), nil
|
||||
case ConvISO8859_1:
|
||||
return encodeWithReplacement(s, charmap.ISO8859_1), nil
|
||||
case ConvWindows1252:
|
||||
return encodeWithReplacement(s, charmap.Windows1252), nil
|
||||
case ConvMacintosh:
|
||||
return encodeWithReplacement(s, charmap.Macintosh), nil
|
||||
case ConvCharmap:
|
||||
var cmapType fs.Enum[cmapChoices]
|
||||
err := cmapType.Set(t.value)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
c := charmapByID(cmapType)
|
||||
return encodeWithReplacement(s, c), nil
|
||||
case ConvLowercase:
|
||||
return strings.ToLower(s), nil
|
||||
case ConvUppercase:
|
||||
return strings.ToUpper(s), nil
|
||||
case ConvTitlecase:
|
||||
return strings.ToTitle(s), nil
|
||||
case ConvASCII:
|
||||
return toASCII(s), nil
|
||||
case ConvURL:
|
||||
return url.QueryEscape(s), nil
|
||||
case ConvDate:
|
||||
return s + AppyTimeGlobs(t.value, time.Now()), nil
|
||||
case ConvRegex:
|
||||
split := strings.Split(t.value, "/")
|
||||
if len(split) != 2 {
|
||||
return s, fmt.Errorf("regex syntax error: %v", t.value)
|
||||
}
|
||||
re := regexp.MustCompile(split[0])
|
||||
return re.ReplaceAllString(s, split[1]), nil
|
||||
case ConvCommand:
|
||||
return mapper(s, t.value)
|
||||
default:
|
||||
return "", errors.New("this option is not yet implemented")
|
||||
}
|
||||
}
|
||||
|
||||
// SuffixKeepExtension adds a suffix while keeping extension
|
||||
//
|
||||
// i.e. file.txt becomes file_somesuffix.txt not file.txt_somesuffix
|
||||
func SuffixKeepExtension(remote string, suffix string) string {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
}
|
||||
|
||||
// forbid transformations that add/remove path separators
|
||||
func validateSegment(s string) error {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return errors.New("transform cannot render path segments empty")
|
||||
}
|
||||
if strings.ContainsRune(s, '/') {
|
||||
return fmt.Errorf("transform cannot add path separators: %v", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
timeFormat = time.DateTime
|
||||
case "DateOnly":
|
||||
timeFormat = time.DateOnly
|
||||
case "TimeOnly":
|
||||
timeFormat = time.TimeOnly
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
case "YYYYMMDD":
|
||||
timeFormat = "20060102"
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
func mapper(s string, command string) (string, error) {
|
||||
out, err := exec.Command(command, s).CombinedOutput()
|
||||
if err != nil {
|
||||
out = bytes.TrimSpace(out)
|
||||
return s, fmt.Errorf("%s: error running command %q: %v", out, command+" "+s, err)
|
||||
}
|
||||
return string(bytes.TrimSpace(out)), nil
|
||||
}
|
||||
142
lib/transform/transform_test.go
Normal file
142
lib/transform/transform_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package transform
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// sync tests are in fs/sync/sync_transform_test.go to avoid import cycle issues
|
||||
|
||||
func newOptions(s ...string) (context.Context, error) {
|
||||
ctx := context.Background()
|
||||
err := SetOptions(ctx, s...)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func TestPath(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"toe/toe/toe", "tictactoe/tictactoe/tictactoe"},
|
||||
{"a/b/c", "tictaca/tictacb/tictacc"},
|
||||
} {
|
||||
ctx, err := newOptions("all,prefix=tac", "all,prefix=tic")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTagOnFile(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "a/b/1c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("file,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirTagOnFile(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "1a/1b/c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("dir,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllTag(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b/c.txt", "1a/1b/1c.txt"},
|
||||
} {
|
||||
ctx, err := newOptions("all,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileTagOnDir(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b", "a/b"},
|
||||
} {
|
||||
ctx, err := newOptions("file,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, true)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirTagOnDir(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
}{
|
||||
{"a/b", "1a/1b"},
|
||||
} {
|
||||
ctx, err := newOptions("dir,prefix=1")
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, true)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVarious(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
want string
|
||||
flags []string
|
||||
}{
|
||||
{"stories/The Quick Brown Fox!.txt", "STORIES/THE QUICK BROWN FOX!.TXT", []string{"all,uppercase"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Slow Brown Turtle!.txt", []string{"all,replace=Fox:Turtle", "all,replace=Quick:Slow"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", []string{"all,base64encode"}},
|
||||
{"c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0", "stories/The Quick Brown Fox!.txt", []string{"all,base64decode"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfc"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox Went to the Café!.txt", []string{"all,nfd"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,ascii"}},
|
||||
{"stories/The Quick Brown 🦊 Fox!.txt", "stories/The+Quick+Brown+%F0%9F%A6%8A+Fox%21.txt", []string{"all,url"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!", []string{"all,trimsuffix=.txt"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "OLD_stories/OLD_The Quick Brown Fox!.txt", []string{"all,prefix=OLD_"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown _ Fox Went to the Caf_!.txt", []string{"all,charmap=ISO-8859-7"}},
|
||||
{"stories/The Quick Brown Fox: A Memoir [draft].txt", "stories/The Quick Brown Fox: A Memoir [draft].txt", []string{"all,encoder=Colon,SquareBracket"}},
|
||||
{"stories/The Quick Brown 🦊 Fox Went to the Café!.txt", "stories/The Quick Brown 🦊 Fox", []string{"all,truncate=21"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt", []string{"all,command=echo"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("20060102"), []string{"date=-{YYYYMMDD}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "stories/The Quick Brown Fox!.txt-" + time.Now().Local().Format("2006-01-02 0304PM"), []string{"date=-{macfriendlytime}"}},
|
||||
{"stories/The Quick Brown Fox!.txt", "ababababababab/ababab ababababab ababababab ababab!abababab", []string{"all,regex=[\\.\\w]/ab"}},
|
||||
} {
|
||||
ctx, err := newOptions(test.flags...)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := Path(ctx, test.path, false)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user