1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-28 07:13:39 +00:00

Compare commits

..

7 Commits

Author SHA1 Message Date
Nick Craig-Wood
dbce609665 docs: Fix help string for --name-transform 2025-05-06 15:26:22 +01:00
Nick Craig-Wood
62e8512711 docs: move --max-connections documentation to the correct place 2025-05-06 15:26:10 +01:00
nielash
53bdd58085 lib/transform: add caching support
adds local caching to avoid re-parsing when possible
2025-05-04 07:51:15 -04:00
nielash
7b9f8eca00 lib/transform: refactor and add TimeFormat support 2025-05-04 07:51:15 -04:00
nielash
433ed18e91 convmv: add convmv command
convmv supports advanced path name transformations for converting and renaming
files and directories by applying prefixes, suffixes, and other alterations.

For example:

rclone convmv "stories/The Quick Brown Fox!.txt" --name-transform "all,uppercase"
// Output: STORIES/THE QUICK BROWN FOX!.TXT

See help doc for complete details.
2025-05-04 07:50:34 -04:00
nielash
34a20555ca lib/transform
lib/transform adds the transform library, supporting advanced path name
transformations for converting and renaming files and directories by applying
prefixes, suffixes, and other alterations.

It also adds the --name-transform flag for use with sync, copy, and move.

Multiple transformations can be used in sequence, applied in the order they are
specified on the command line.

By default --name-transform will only apply to file names. The means only the leaf
file name will be transformed. However some of the transforms would be better
applied to the whole path or just directories. To choose which which part of the
file path is affected some tags can be added to the --name-transform:

file	Only transform the leaf name of files (DEFAULT)
dir	Only transform name of directories - these may appear anywhere in the path
all	Transform the entire path for files and directories

Example syntax:
--name-transform file,prefix=ABC
--name-transform dir,prefix=DEF
2025-05-04 05:49:44 -04:00
nielash
f20ee1488b march: split src and dst
splits m.key into separate functions for src and dst to prepare for
lib/transform which will want to do transforms on the src side only.

Co-Authored-By: Nick Craig-Wood <nick@craig-wood.com>
2025-05-04 05:21:43 -04:00
30 changed files with 354 additions and 1220 deletions

View File

@@ -572,19 +572,3 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -43,7 +43,6 @@ var (
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
errReadOnly = errors.New("can't upload files in read only mode")
)
const (
@@ -53,31 +52,19 @@ const (
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
// scopes needed for read write access
scopesReadWrite = []string{
"openid",
"profile",
scopeAppendOnly,
scopeReadOnly,
scopeReadWrite,
}
// scopes needed for read only access
scopesReadOnly = []string{
"openid",
"profile",
scopeReadOnly,
}
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: scopesReadWrite,
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
@@ -113,9 +100,9 @@ func init() {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes = scopesReadOnly
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes = scopesReadWrite
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
@@ -1133,9 +1120,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if !album.IsWriteable {
if o.fs.opt.ReadOnly {
return errReadOnly
}
return errOwnAlbums
}

View File

@@ -252,14 +252,18 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
}
resp, err := d.icloud.srv.Call(ctx, opts)
// icloud has some weird http codes
if err != nil && resp != nil && resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return resp, err
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.

View File

@@ -56,7 +56,6 @@ const (
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.Mebi
chunkSizeMultiple = 320 * fs.Kibi
maxSinglePartSize = 4 * fs.Mebi
regionGlobal = "global"
regionUS = "us"
@@ -139,21 +138,6 @@ func init() {
Help: "Azure and Office 365 operated by Vnet Group in China",
},
},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
This is disabled by default as uploading using single part uploads
causes rclone to use twice the storage on Onedrive business as when
rclone sets the modification time after the upload Onedrive creates a
new version.
See: https://github.com/rclone/rclone/issues/1716
`,
Default: fs.SizeSuffix(-1),
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -762,7 +746,6 @@ Examples:
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
@@ -1039,13 +1022,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxSinglePartSize {
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
}
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1059,10 +1035,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
@@ -2497,10 +2469,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return false, nil
}
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
time.Sleep(5 * time.Second) // a little delay to help things along
return true, err
}
if err != nil {
return shouldRetry(ctx, resp, err)
@@ -2595,8 +2563,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
size := src.Size()
if size < 0 || size > int64(maxSinglePartSize) {
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
}
fs.Debugf(o, "Starting singlepart upload")
@@ -2649,9 +2617,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var info *api.Item
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
if size > 0 {
info, err = o.uploadMultipart(ctx, in, src, options...)
} else if size >= 0 {
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, src, options...)
} else {
return errors.New("unknown-sized upload not supported")

View File

@@ -3,22 +3,20 @@ package bilib
import (
"bytes"
"log/slog"
"log"
"github.com/rclone/rclone/fs/log"
"github.com/sirupsen/logrus"
)
// CaptureOutput runs a function capturing its output at log level INFO.
// CaptureOutput runs a function capturing its output.
func CaptureOutput(fun func()) []byte {
logSave := log.Writer()
logrusSave := logrus.StandardLogger().Out
buf := &bytes.Buffer{}
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
log.Handler.SetOutput(func(level slog.Level, text string) {
buf.WriteString(text)
})
defer func() {
log.Handler.ResetOutput()
log.Handler.SetLevel(oldLevel)
}()
log.SetOutput(buf)
logrus.SetOutput(buf)
fun()
log.SetOutput(logSave)
logrus.SetOutput(logrusSave)
return buf.Bytes()
}

View File

@@ -6,8 +6,6 @@ package ncdu
import (
"context"
"fmt"
"log/slog"
"os"
"path"
"reflect"
"sort"
@@ -927,19 +925,23 @@ func (u *UI) Run() error {
return fmt.Errorf("screen init: %w", err)
}
// Hijack log output so that it doesn't corrupt the screen.
if !log.Redirected() {
var logs []string
log.Handler.SetOutput(func(level slog.Level, text string) {
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
if logOutput := fs.LogOutput; !log.Redirected() {
type log struct {
text string
level fs.LogLevel
}
var logs []log
fs.LogOutput = func(level fs.LogLevel, text string) {
if len(logs) > 100 {
logs = logs[len(logs)-100:]
}
logs = append(logs, text)
})
logs = append(logs, log{level: level, text: text})
}
defer func() {
log.Handler.ResetOutput()
for _, text := range logs {
_, _ = os.Stderr.WriteString(text)
fs.LogOutput = logOutput
for i := range logs {
logOutput(logs[i].level, logs[i].text)
}
}()
}

View File

@@ -5,11 +5,11 @@ package cmd
import (
"bytes"
"fmt"
"log/slog"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
@@ -19,6 +19,8 @@ import (
const (
// interval between progress prints
defaultProgressInterval = 500 * time.Millisecond
// time format for logging
logTimeFormat = "2006/01/02 15:04:05"
)
// startProgress starts the progress bar printing
@@ -26,13 +28,15 @@ const (
// It returns a func which should be called to stop the stats.
func startProgress() func() {
stopStats := make(chan struct{})
oldLogOutput := fs.LogOutput
oldSyncPrint := operations.SyncPrintf
if !log.Redirected() {
// Intercept the log calls if not logging to file or syslog
log.Handler.SetOutput(func(level slog.Level, text string) {
printProgress(text)
})
fs.LogOutput = func(level fs.LogLevel, text string) {
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
}
}
// Intercept output from functions such as HashLister to stdout
@@ -56,10 +60,7 @@ func startProgress() func() {
case <-stopStats:
ticker.Stop()
printProgress("")
if !log.Redirected() {
// Reset intercept of the log calls
log.Handler.ResetOutput()
}
fs.LogOutput = oldLogOutput
operations.SyncPrintf = oldSyncPrint
fmt.Println("")
return

View File

@@ -28,8 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
if entry.IsDir() {
if addPrefix {
prefixWithTrailingSlash := objectPath + "/"
response.AddPrefix(prefixWithTrailingSlash)
response.AddPrefix(objectPath)
continue
}
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)

View File

@@ -48,7 +48,7 @@ func TestEnvironmentVariables(t *testing.T) {
env = "RCLONE_LOG_LEVEL=DEBUG"
out, err = rcloneEnv(env, "version", "--quiet")
if assert.Error(t, err) {
assert.Contains(t, out, " DEBUG ")
assert.Contains(t, out, " DEBUG : ")
assert.Contains(t, out, "Can't set -q and --log-level")
assert.Contains(t, "exit status 1", err.Error())
}
@@ -329,7 +329,7 @@ func TestEnvironmentVariables(t *testing.T) {
jsonLogOK := func() {
t.Helper()
if assert.NoError(t, err) {
assert.Contains(t, out, `"level":"debug"`)
assert.Contains(t, out, `{"level":"debug",`)
assert.Contains(t, out, `"msg":"Version `)
assert.Contains(t, out, `"}`)
}

View File

@@ -967,9 +967,3 @@ put them back in again.` >}}
* Christian Richter <crichter@owncloud.com> <1058116+dragonchaser@users.noreply.github.com>
* Ralf Haferkamp <r.haferkamp@opencloud.eu>
* Jugal Kishore <me@devjugal.com>
* Tho Neyugn <nguyentruongtho@users.noreply.github.com>
* Ben Boeckel <mathstuf@users.noreply.github.com>
* Clément Wehrung <cwehrung@nurves.com>
* Jeff Geerling <geerlingguy@mac.com>
* Germán Casares <german.casares.march+github@gmail.com>
* fhuber <florian.huber@noris.de>

View File

@@ -5,14 +5,6 @@ description: "Rclone Changelog"
# Changelog
## v1.69.3 - 2025-05-21
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
* Bug Fixes
* build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
* build: Update github.com/ebitengine/purego to work around bug in go1.24.3 (Nick Craig-Wood)
## v1.69.2 - 2025-05-01
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)

View File

@@ -968,9 +968,8 @@ on any OS, and the value is defined as following:
- On Unix: `$HOME` if defined, else by looking up current user in OS-specific user database
(e.g. passwd file), or else use the result from shell command `cd && pwd`.
If you run `rclone config file` you will see where the default location is for
you. Running `rclone config touch` will ensure a configuration file exists,
creating an empty one in the default location if there is none.
If you run `rclone config file` you will see where the default
location is for you.
The fact that an existing file `rclone.conf` in the same directory
as the rclone executable is always preferred, means that it is easy
@@ -981,13 +980,7 @@ same directory.
If the location is set to empty string `""` or path to a file
with name `notfound`, or the os null device represented by value `NUL` on
Windows and `/dev/null` on Unix systems, then rclone will keep the
configuration file in memory only.
You may see a log message "Config file not found - using defaults" if there is
no configuration file. This can be supressed, e.g. if you are using rclone
entirely with [on the fly remotes](/docs/#backend-path-to-dir), by using
memory-only configuration file or by creating an empty configuration file, as
described above.
config file in memory only.
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
Sections of text, led by a `[section]` header and followed by
@@ -1483,21 +1476,12 @@ have a signal to rotate logs.
### --log-format LIST ###
Comma separated list of log format options. The accepted options are:
- `date` - Add a date in the format YYYY/MM/YY to the log.
- `time` - Add a time to the log in format HH:MM:SS.
- `microseconds` - Add microseconds to the time in format HH:MM:SS.SSSSSS.
- `UTC` - Make the logs in UTC not localtime.
- `longfile` - Adds the source file and line number of the log statement.
- `shortfile` - Adds the source file and line number of the log statement.
- `pid` - Add the process ID to the log - useful with `rclone mount --daemon`.
- `nolevel` - Don't add the level to the log.
- `json` - Equivalent to adding `--use-json-log`
They are added to the log line in the order above.
The default log format is `"date,time"`.
Comma separated list of log format options. Accepted options are `date`,
`time`, `microseconds`, `pid`, `longfile`, `shortfile`, `UTC`. Any other
keywords will be silently ignored. `pid` will tag log messages with process
identifier which useful with `rclone mount --daemon`. Other accepted
options are explained in the [go documentation](https://pkg.go.dev/log#pkg-constants).
The default log format is "`date`,`time`".
### --log-level LEVEL ###
@@ -1515,90 +1499,10 @@ warnings and significant events.
`ERROR` is equivalent to `-q`. It only outputs error messages.
### --windows-event-log LEVEL ###
If this is configured (the default is `OFF`) then logs of this level
and above will be logged to the Windows event log in **addition** to
the normal logs. These will be logged in JSON format as described
below regardless of what format the main logs are configured for.
The Windows event log only has 3 levels of severity `Info`, `Warning`
and `Error`. If enabled we map rclone levels like this.
- `Error``ERROR` (and above)
- `Warning``WARNING` (note that this level is defined but not currently used).
- `Info``NOTICE`, `INFO` and `DEBUG`.
Rclone will declare its log source as "rclone" if it is has enough
permissions to create the registry key needed. If not then logs will
appear as "Application". You can run `rclone version --windows-event-log DEBUG`
once as administrator to create the registry key in advance.
**Note** that the `--windows-event-log` level must be greater (more
severe) than or equal to the `--log-level`. For example to log DEBUG
to a log file but ERRORs to the event log you would use
--log-file rclone.log --log-level DEBUG --windows-event-log ERROR
This option is only supported Windows platforms.
### --use-json-log ###
This switches the log format to JSON for rclone. The fields of JSON
log are `level`, `msg`, `source`, `time`. The JSON logs will be
printed on a single line, but are shown expanded here for clarity.
```json
{
"time": "2025-05-13T17:30:51.036237518+01:00",
"level": "debug",
"msg": "4 go routines active\n",
"source": "cmd/cmd.go:298"
}
```
Completed data transfer logs will have extra `size` information. Logs
which are about a particular object will have `object` and
`objectType` fields also.
```json
{
"time": "2025-05-13T17:38:05.540846352+01:00",
"level": "info",
"msg": "Copied (new) to: file2.txt",
"size": 6,
"object": "file.txt",
"objectType": "*local.Object",
"source": "operations/copy.go:368"
}
```
Stats logs will contain a `stats` field which is the same as
returned from the rc call [core/stats](/rc/#core-stats).
```json
{
"time": "2025-05-13T17:38:05.540912847+01:00",
"level": "info",
"msg": "...text version of the stats...",
"stats": {
"bytes": 6,
"checks": 0,
"deletedDirs": 0,
"deletes": 0,
"elapsedTime": 0.000904825,
...truncated for clarity...
"totalBytes": 6,
"totalChecks": 0,
"totalTransfers": 1,
"transferTime": 0.000882794,
"transfers": 1
},
"source": "accounting/stats.go:569"
}
```
This switches the log format to JSON for rclone. The fields of json log
are level, msg, source, time.
### --low-level-retries NUMBER ###

View File

@@ -22,20 +22,6 @@ See the [remote setup docs](/remote_setup/) for more info.
This has now been documented in its own [remote setup page](/remote_setup/).
### How can I get rid of the "Config file not found" notice?
If you see a notice like 'NOTICE: Config file "rclone.conf" not found', this
means you have not configured any remotes.
If you need to configure a remote, see the [config help docs](/docs/#configure).
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
you can create an empty config file to get rid of this notice, for example:
```
rclone config touch
```
### Can rclone sync directly from drive to s3 ###
Rclone can sync between two remote cloud storage systems just fine.

View File

@@ -14,11 +14,6 @@ Google Photos.
limitations, so please read the [limitations section](#limitations)
carefully to make sure it is suitable for your use.
**NB** From March 31, 2025 rclone can only download photos it
uploaded. This limitation is due to policy changes at Google. You may
need to run `rclone config reconnect remote:` to make rclone work
again after upgrading to rclone v1.70.
## Configuration
The initial setup for google cloud storage involves getting a token from Google Photos
@@ -533,11 +528,6 @@ videos or images or formats that Google Photos doesn't understand,
rclone will upload the file, then Google Photos will give an error
when it is put turned into a media item.
**NB** From March 31, 2025 rclone can only download photos it
uploaded. This limitation is due to policy changes at Google. You may
need to run `rclone config reconnect remote:` to make rclone work
again after upgrading to rclone v1.70.
Note that all media items uploaded to Google Photos through the API
are stored in full resolution at "original quality" and **will** count
towards your storage quota in your Google Account. The API does

View File

@@ -5551,7 +5551,7 @@ source).
This has the following consequences:
- Using `rclone rcat` will fail as the metadata doesn't match after upload
- Using `rclone rcat` will fail as the medatada doesn't match after upload
- Uploading files with `rclone mount` will fail for the same reason
- This can worked around by using `--vfs-cache-mode writes` or `--vfs-cache-mode full` or setting `--s3-upload-cutoff` large
- Files uploaded via a multipart upload won't have their modtimes

View File

@@ -677,13 +677,9 @@ func init() {
RegisterGlobalOptions(OptionsInfo{Name: "main", Opt: globalConfig, Options: ConfigOptionsInfo, Reload: globalConfig.Reload})
// initial guess at log level from the flags
globalConfig.LogLevel = InitialLogLevel()
globalConfig.LogLevel = initialLogLevel()
}
// LogReload is written by fs/log to set variables which should really
// be there but we can't move due to them being visible here in the rc.
var LogReload = func(*ConfigInfo) error { return nil }
// Reload assumes the config has been edited and does what is necessary to make it live
func (ci *ConfigInfo) Reload(ctx context.Context) error {
// Set -vv if --dump is in use
@@ -697,6 +693,11 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
ci.StatsLogLevel = LogLevelNotice
}
// If --use-json-log then start the JSON logger
if ci.UseJSONLog {
InstallJSONLogger(ci.LogLevel)
}
// Check --compare-dest and --copy-dest
if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 {
return fmt.Errorf("can't use --compare-dest with --copy-dest")
@@ -736,12 +737,13 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
nonZero(&ci.Transfers)
nonZero(&ci.Checkers)
return LogReload(ci)
return nil
}
// InitialLogLevel performs a simple check for debug flags to enable
// debug logging during the flag initialization.
func InitialLogLevel() LogLevel {
// Initial logging level
//
// Perform a simple check for debug flags to enable debug logging during the flag initialization
func initialLogLevel() LogLevel {
logLevel := LogLevelNotice
for argIndex, arg := range os.Args {
if strings.HasPrefix(arg, "-vv") && strings.TrimRight(arg, "v") == "-" {

126
fs/log.go
View File

@@ -4,9 +4,10 @@ import (
"context"
"encoding/json"
"fmt"
"log/slog"
"log"
"os"
"slices"
"github.com/sirupsen/logrus"
)
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
@@ -32,7 +33,6 @@ const (
LogLevelNotice // Normal logging, -q suppresses
LogLevelInfo // Transfers, needs -v
LogLevelDebug // Debug level, needs -vv
LogLevelOff
)
type logLevelChoices struct{}
@@ -47,7 +47,6 @@ func (logLevelChoices) Choices() []string {
LogLevelNotice: "NOTICE",
LogLevelInfo: "INFO",
LogLevelDebug: "DEBUG",
LogLevelOff: "OFF",
}
}
@@ -55,33 +54,19 @@ func (logLevelChoices) Type() string {
return "LogLevel"
}
// slogLevel definitions defined as slog.Level constants.
// The integer values determine severity for filtering.
// Lower values are less severe (e.g., Debug), higher values are more severe (e.g., Emergency).
// We fit our extra values into slog's scale.
const (
// slog.LevelDebug slog.Level = -4
// slog.LevelInfo slog.Level = 0
SlogLevelNotice = slog.Level(2) // Between Info (0) and Warn (4)
// slog.LevelWarn slog.Level = 4
// slog.LevelError slog.Level = 8
SlogLevelCritical = slog.Level(12) // More severe than Error
SlogLevelAlert = slog.Level(16) // More severe than Critical
SlogLevelEmergency = slog.Level(20) // Most severe
SlogLevelOff = slog.Level(24) // A very high value
)
// LogPrintPid enables process pid in log
var LogPrintPid = false
// Map our level numbers to slog level numbers
var levelToSlog = []slog.Level{
LogLevelEmergency: SlogLevelEmergency,
LogLevelAlert: SlogLevelAlert,
LogLevelCritical: SlogLevelCritical,
LogLevelError: slog.LevelError,
LogLevelWarning: slog.LevelWarn,
LogLevelNotice: SlogLevelNotice,
LogLevelInfo: slog.LevelInfo,
LogLevelDebug: slog.LevelDebug,
LogLevelOff: SlogLevelOff,
// InstallJSONLogger is a hook that --use-json-log calls
var InstallJSONLogger = func(logLevel LogLevel) {}
// LogOutput sends the text to the logger of level
var LogOutput = func(level LogLevel, text string) {
text = fmt.Sprintf("%-6s: %s", level, text)
if LogPrintPid {
text = fmt.Sprintf("[%d] %s", os.Getpid(), text)
}
_ = log.Output(4, text)
}
// LogValueItem describes keyed item for a JSON log entry
@@ -123,45 +108,76 @@ func (j LogValueItem) String() string {
return fmt.Sprint(j.value)
}
// LogLevelToSlog converts an rclone log level to log/slog log level.
func LogLevelToSlog(level LogLevel) slog.Level {
slogLevel := slog.LevelError
// NB level is unsigned so we don't check < 0 here
if int(level) < len(levelToSlog) {
slogLevel = levelToSlog[level]
func logLogrus(level LogLevel, text string, fields logrus.Fields) {
switch level {
case LogLevelDebug:
logrus.WithFields(fields).Debug(text)
case LogLevelInfo:
logrus.WithFields(fields).Info(text)
case LogLevelNotice, LogLevelWarning:
logrus.WithFields(fields).Warn(text)
case LogLevelError:
logrus.WithFields(fields).Error(text)
case LogLevelCritical:
logrus.WithFields(fields).Fatal(text)
case LogLevelEmergency, LogLevelAlert:
logrus.WithFields(fields).Panic(text)
}
return slogLevel
}
func logSlog(level LogLevel, text string, attrs []any) {
slog.Log(context.Background(), LogLevelToSlog(level), text, attrs...)
}
func logSlogWithObject(level LogLevel, o any, text string, attrs []any) {
func logLogrusWithObject(level LogLevel, o any, text string, fields logrus.Fields) {
if o != nil {
attrs = slices.Concat(attrs, []any{
"object", fmt.Sprintf("%+v", o),
"objectType", fmt.Sprintf("%T", o),
})
if fields == nil {
fields = logrus.Fields{}
}
fields["object"] = fmt.Sprintf("%+v", o)
fields["objectType"] = fmt.Sprintf("%T", o)
}
logSlog(level, text, attrs)
logLogrus(level, text, fields)
}
func logJSON(level LogLevel, o any, text string) {
logLogrusWithObject(level, o, text, nil)
}
func logJSONf(level LogLevel, o any, text string, args ...any) {
text = fmt.Sprintf(text, args...)
fields := logrus.Fields{}
for _, arg := range args {
if item, ok := arg.(LogValueItem); ok {
fields[item.key] = item.value
}
}
logLogrusWithObject(level, o, text, fields)
}
func logPlain(level LogLevel, o any, text string) {
if o != nil {
text = fmt.Sprintf("%v: %s", o, text)
}
LogOutput(level, text)
}
func logPlainf(level LogLevel, o any, text string, args ...any) {
logPlain(level, o, fmt.Sprintf(text, args...))
}
// LogPrint produces a log string from the arguments passed in
func LogPrint(level LogLevel, o any, text string) {
logSlogWithObject(level, o, text, nil)
if GetConfig(context.TODO()).UseJSONLog {
logJSON(level, o, text)
} else {
logPlain(level, o, text)
}
}
// LogPrintf produces a log string from the arguments passed in
func LogPrintf(level LogLevel, o any, text string, args ...any) {
text = fmt.Sprintf(text, args...)
var fields []any
for _, arg := range args {
if item, ok := arg.(LogValueItem); ok {
fields = append(fields, item.key, item.value)
}
if GetConfig(context.TODO()).UseJSONLog {
logJSONf(level, o, text, args...)
} else {
logPlainf(level, o, text, args...)
}
logSlogWithObject(level, o, text, fields)
}
// LogLevelPrint writes logs at the given level

106
fs/log/caller_hook.go Normal file
View File

@@ -0,0 +1,106 @@
package log
import (
"fmt"
"runtime"
"strings"
"github.com/rclone/rclone/fs"
"github.com/sirupsen/logrus"
)
var loggerInstalled = false
// InstallJSONLogger installs the JSON logger at the specified log level
func InstallJSONLogger(logLevel fs.LogLevel) {
if !loggerInstalled {
logrus.AddHook(NewCallerHook())
loggerInstalled = true
}
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
})
logrus.SetLevel(logrus.DebugLevel)
switch logLevel {
case fs.LogLevelEmergency, fs.LogLevelAlert:
logrus.SetLevel(logrus.PanicLevel)
case fs.LogLevelCritical:
logrus.SetLevel(logrus.FatalLevel)
case fs.LogLevelError:
logrus.SetLevel(logrus.ErrorLevel)
case fs.LogLevelWarning, fs.LogLevelNotice:
logrus.SetLevel(logrus.WarnLevel)
case fs.LogLevelInfo:
logrus.SetLevel(logrus.InfoLevel)
case fs.LogLevelDebug:
logrus.SetLevel(logrus.DebugLevel)
}
}
// install hook in fs to call to avoid circular dependency
func init() {
fs.InstallJSONLogger = InstallJSONLogger
}
// CallerHook for log the calling file and line of the fine
type CallerHook struct {
Field string
Skip int
levels []logrus.Level
}
// NewCallerHook use to make a hook
func NewCallerHook(levels ...logrus.Level) logrus.Hook {
hook := CallerHook{
Field: "source",
Skip: 7,
levels: levels,
}
if len(hook.levels) == 0 {
hook.levels = logrus.AllLevels
}
return &hook
}
// Levels implement applied hook to which levels
func (h *CallerHook) Levels() []logrus.Level {
return logrus.AllLevels
}
// Fire logs the information of context (filename and line)
func (h *CallerHook) Fire(entry *logrus.Entry) error {
entry.Data[h.Field] = findCaller(h.Skip)
return nil
}
// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller
func findCaller(skip int) string {
file := ""
line := 0
for i := range 10 {
file, line = getCaller(skip + i)
if !strings.HasPrefix(file, "logrus") && !strings.Contains(file, "log.go") {
break
}
}
return fmt.Sprintf("%s:%d", file, line)
}
func getCaller(skip int) (string, int) {
_, file, line, ok := runtime.Caller(skip)
// fmt.Println(file,":",line)
if !ok {
return "", 0
}
n := 0
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
n++
if n >= 2 {
file = file[i+1:]
break
}
}
}
return file, line
}

View File

@@ -1,15 +0,0 @@
// Windows event logging stubs for non windows machines
//go:build !windows
package log
import (
"fmt"
"runtime"
)
// Starts windows event log if configured.
func startWindowsEventLog(*OutputHandler) error {
return fmt.Errorf("windows event log not supported on %s platform", runtime.GOOS)
}

View File

@@ -1,79 +0,0 @@
// Windows event logging
//go:build windows
package log
import (
"fmt"
"log/slog"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc/eventlog"
)
const (
errorID = uint32(windows.ERROR_INTERNAL_ERROR)
infoID = uint32(windows.ERROR_SUCCESS)
sourceName = "rclone"
)
var (
windowsEventLog *eventlog.Log
)
func startWindowsEventLog(handler *OutputHandler) error {
// Don't install Windows event log if it is disabled.
if Opt.WindowsEventLogLevel == fs.LogLevelOff {
return nil
}
// Install the event source - we don't care if this fails as Windows has sensible fallbacks.
_ = eventlog.InstallAsEventCreate(sourceName, eventlog.Info|eventlog.Warning|eventlog.Error)
// Open the event log
// If sourceName didn't get registered then Windows will use "Application" instead which is fine.
// Though in my tests it seemsed to use sourceName regardless.
elog, err := eventlog.Open(sourceName)
if err != nil {
return fmt.Errorf("open event log: %w", err)
}
// Set the global for the handler
windowsEventLog = elog
// Close it on exit
atexit.Register(func() {
err := elog.Close()
if err != nil {
fs.Errorf(nil, "Failed to close Windows event log: %v", err)
}
})
// Add additional JSON logging to the eventLog handler.
handler.AddOutput(true, eventLog)
fs.Infof(nil, "Logging to Windows event log at level %v", Opt.WindowsEventLogLevel)
return nil
}
// We use levels ERROR, NOTICE, INFO, DEBUG
// Need to map to ERROR, WARNING, INFO
func eventLog(level slog.Level, text string) {
// Check to see if this level is required
if level < fs.LogLevelToSlog(Opt.WindowsEventLogLevel) {
return
}
// Now log to windows eventLog
switch level {
case fs.SlogLevelEmergency, fs.SlogLevelAlert, fs.SlogLevelCritical, slog.LevelError:
_ = windowsEventLog.Error(errorID, text)
case slog.LevelWarn:
_ = windowsEventLog.Warning(infoID, text)
case fs.SlogLevelNotice, slog.LevelInfo, slog.LevelDebug:
_ = windowsEventLog.Info(infoID, text)
}
}

View File

@@ -3,14 +3,15 @@ package log
import (
"context"
"fmt"
"io"
"log"
"os"
"reflect"
"runtime"
"strings"
"github.com/rclone/rclone/fs"
"github.com/sirupsen/logrus"
)
// OptionsInfo descripts the Options in use
@@ -21,7 +22,7 @@ var OptionsInfo = fs.Options{{
Groups: "Logging",
}, {
Name: "log_format",
Default: logFormatDate | logFormatTime,
Default: "date,time",
Help: "Comma separated list of log format options",
Groups: "Logging",
}, {
@@ -39,27 +40,15 @@ var OptionsInfo = fs.Options{{
Default: false,
Help: "Activate systemd integration for the logger",
Groups: "Logging",
}, {
Name: "windows_event_log_level",
Default: fs.LogLevelOff,
Help: "Windows Event Log level DEBUG|INFO|NOTICE|ERROR|OFF",
Groups: "Logging",
Hide: func() fs.OptionVisibility {
if runtime.GOOS == "windows" {
return 0
}
return fs.OptionHideBoth
}(),
}}
// Options contains options for controlling the logging
type Options struct {
File string `config:"log_file"` // Log everything to this file
Format logFormat `config:"log_format"` // Comma separated list of log format options
UseSyslog bool `config:"syslog"` // Use Syslog for logging
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
WindowsEventLogLevel fs.LogLevel `config:"windows_event_log_level"`
File string `config:"log_file"` // Log everything to this file
Format string `config:"log_format"` // Comma separated list of log format options
UseSyslog bool `config:"syslog"` // Use Syslog for logging
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
}
func init() {
@@ -69,37 +58,6 @@ func init() {
// Opt is the options for the logger
var Opt Options
// enum for the log format
type logFormat = fs.Bits[logFormatChoices]
const (
logFormatDate logFormat = 1 << iota
logFormatTime
logFormatMicroseconds
logFormatUTC
logFormatLongFile
logFormatShortFile
logFormatPid
logFormatNoLevel
logFormatJSON
)
type logFormatChoices struct{}
func (logFormatChoices) Choices() []fs.BitsChoicesInfo {
return []fs.BitsChoicesInfo{
{Bit: uint64(logFormatDate), Name: "date"},
{Bit: uint64(logFormatTime), Name: "time"},
{Bit: uint64(logFormatMicroseconds), Name: "microseconds"},
{Bit: uint64(logFormatUTC), Name: "UTC"},
{Bit: uint64(logFormatLongFile), Name: "longfile"},
{Bit: uint64(logFormatShortFile), Name: "shortfile"},
{Bit: uint64(logFormatPid), Name: "pid"},
{Bit: uint64(logFormatNoLevel), Name: "nolevel"},
{Bit: uint64(logFormatJSON), Name: "json"},
}
}
// fnName returns the name of the calling +2 function
func fnName() string {
pc, _, _, ok := runtime.Caller(2)
@@ -156,29 +114,31 @@ func Stack(o any, info string) {
fs.LogPrintf(fs.LogLevelDebug, o, "%s\nStack trace:\n%s", info, buf)
}
// This is called from fs when the config is reloaded
//
// The config should really be here but we can't move it as it is
// externally visible in the rc.
func logReload(ci *fs.ConfigInfo) error {
Handler.SetLevel(fs.LogLevelToSlog(ci.LogLevel))
if Opt.WindowsEventLogLevel != fs.LogLevelOff && Opt.WindowsEventLogLevel > ci.LogLevel {
return fmt.Errorf("--windows-event-log-level %q must be >= --log-level %q", Opt.WindowsEventLogLevel, ci.LogLevel)
}
return nil
}
func init() {
fs.LogReload = logReload
}
// InitLogging start the logging as per the command line flags
func InitLogging() {
// Note that ci only has the defaults in at this point
// We set real values in logReload
ci := fs.GetConfig(context.Background())
flagsStr := "," + Opt.Format + ","
var flags int
if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate
}
if strings.Contains(flagsStr, ",time,") {
flags |= log.Ltime
}
if strings.Contains(flagsStr, ",microseconds,") {
flags |= log.Lmicroseconds
}
if strings.Contains(flagsStr, ",UTC,") {
flags |= log.LUTC
}
if strings.Contains(flagsStr, ",longfile,") {
flags |= log.Llongfile
}
if strings.Contains(flagsStr, ",shortfile,") {
flags |= log.Lshortfile
}
log.SetFlags(flags)
fs.LogPrintPid = strings.Contains(flagsStr, ",pid,")
// Log file output
if Opt.File != "" {
@@ -190,27 +150,17 @@ func InitLogging() {
if err != nil {
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
}
log.SetOutput(f)
logrus.SetOutput(f)
redirectStderr(f)
Handler.setWriter(f)
}
// --use-json-log implies JSON formatting
if ci.UseJSONLog {
Opt.Format |= logFormatJSON
}
// Set slog level to initial log level
Handler.SetLevel(fs.LogLevelToSlog(fs.InitialLogLevel()))
// Set the format to the configured format
Handler.setFormat(Opt.Format)
// Syslog output
if Opt.UseSyslog {
if Opt.File != "" {
fs.Fatalf(nil, "Can't use --syslog and --log-file together")
}
startSysLog(Handler)
startSysLog()
}
// Activate systemd logger support if systemd invocation ID is
@@ -223,15 +173,7 @@ func InitLogging() {
// Systemd logging output
if Opt.LogSystemdSupport {
startSystemdLog(Handler)
}
// Windows event logging
if Opt.WindowsEventLogLevel != fs.LogLevelOff {
err := startWindowsEventLog(Handler)
if err != nil {
fs.Fatalf(nil, "Failed to start windows event log: %v", err)
}
startSystemdLog()
}
}

View File

@@ -1,391 +0,0 @@
// Interfaces for the slog package
package log
import (
"bytes"
"context"
"fmt"
"io"
"log/slog"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// Handler is the standard handler for the logging.
var Handler = defaultHandler()
// Create the default OutputHandler
//
// This logs to stderr with standard go logger format at level INFO.
//
// This will be adjusted by InitLogging to be the configured levels
// but it is important we have a logger running regardless of whether
// InitLogging has been called yet or not.
func defaultHandler() *OutputHandler {
// Default options for default handler
var opts = &slog.HandlerOptions{
Level: fs.LogLevelToSlog(fs.InitialLogLevel()),
}
// Create our handler
h := NewOutputHandler(os.Stderr, opts, logFormatDate|logFormatTime)
// Set the slog default handler
slog.SetDefault(slog.New(h))
// Make log.Printf logs at level Notice
slog.SetLogLoggerLevel(fs.SlogLevelNotice)
return h
}
// Map slog level names to string
var slogNames = map[slog.Level]string{
slog.LevelDebug: "DEBUG",
slog.LevelInfo: "INFO",
fs.SlogLevelNotice: "NOTICE",
slog.LevelWarn: "WARNING",
slog.LevelError: "ERROR",
fs.SlogLevelCritical: "CRITICAL",
fs.SlogLevelAlert: "ALERT",
fs.SlogLevelEmergency: "EMERGENCY",
}
// Convert a slog level to string using rclone's extra levels
func slogLevelToString(level slog.Level) string {
levelStr := slogNames[level]
if levelStr == "" {
levelStr = level.String()
}
return levelStr
}
// ReplaceAttr function to customize the level key's string value in logs
func mapLogLevelNames(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.LevelKey {
level, ok := a.Value.Any().(slog.Level)
if !ok {
return a
}
levelStr := strings.ToLower(slogLevelToString(level))
a.Value = slog.StringValue(levelStr)
}
return a
}
// get the file and line number of the caller skipping skip levels
func getCaller(skip int) string {
var pc [64]uintptr
n := runtime.Callers(skip, pc[:])
if n == 0 {
return ""
}
frames := runtime.CallersFrames(pc[:n])
var more = true
var frame runtime.Frame
for more {
frame, more = frames.Next()
file := frame.File
if strings.Contains(file, "/log/") || strings.HasSuffix(file, "log.go") {
continue
}
line := frame.Line
// shorten file name
n := 0
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
n++
if n >= 2 {
file = file[i+1:]
break
}
}
}
return fmt.Sprintf("%s:%d", file, line)
}
return ""
}
// OutputHandler is a slog.Handler that writes log records in a format
// identical to the standard library's `log` package (e.g., "YYYY/MM/DD HH:MM:SS message").
//
// It can also write logs in JSON format identical to logrus.
type OutputHandler struct {
opts slog.HandlerOptions
levelVar slog.LevelVar
writer io.Writer
mu sync.Mutex
output []outputFn // log to writer if empty or the last item
outputExtra []outputExtra // log to all these additional places
format logFormat
jsonBuf bytes.Buffer
jsonHandler *slog.JSONHandler
}
// Records the type and function pointer for extra logging output.
type outputExtra struct {
json bool
output outputFn
}
// Define the type of the override logger
type outputFn func(level slog.Level, text string)
// NewOutputHandler creates a new OutputHandler with the specified flags.
//
// This is designed to use log/slog but produce output which is
// backwards compatible with previous rclone versions.
//
// If opts is nil, default options are used, with Level set to
// slog.LevelInfo.
func NewOutputHandler(out io.Writer, opts *slog.HandlerOptions, format logFormat) *OutputHandler {
h := &OutputHandler{
writer: out,
format: format,
}
if opts != nil {
h.opts = *opts
}
if h.opts.Level == nil {
h.opts.Level = slog.LevelInfo
}
// Set the level var with the configured level
h.levelVar.Set(h.opts.Level.Level())
// And use it from now on
h.opts.Level = &h.levelVar
// Create the JSON logger in case we need it
jsonOpts := slog.HandlerOptions{
Level: h.opts.Level,
ReplaceAttr: mapLogLevelNames,
}
h.jsonHandler = slog.NewJSONHandler(&h.jsonBuf, &jsonOpts)
return h
}
// SetOutput sets a new output handler for the log output.
//
// This is for temporarily overriding the output.
func (h *OutputHandler) SetOutput(fn outputFn) {
h.output = append(h.output, fn)
}
// ResetOutput resets the log output to what is was.
func (h *OutputHandler) ResetOutput() {
if len(h.output) > 0 {
h.output = h.output[:len(h.output)-1]
}
}
// AddOutput adds an additional logging destination of the type specified.
func (h *OutputHandler) AddOutput(json bool, fn outputFn) {
h.outputExtra = append(h.outputExtra, outputExtra{
json: json,
output: fn,
})
}
// SetLevel sets a new log level, returning the old one.
func (h *OutputHandler) SetLevel(level slog.Level) slog.Level {
oldLevel := h.levelVar.Level()
h.levelVar.Set(level)
return oldLevel
}
// Set the writer for the log to that passed.
func (h *OutputHandler) setWriter(writer io.Writer) {
h.writer = writer
}
// Set the format flags to that passed in.
func (h *OutputHandler) setFormat(format logFormat) {
h.format = format
}
// clear format flags that this output type doesn't want
func (h *OutputHandler) clearFormatFlags(bitMask logFormat) {
h.format &^= bitMask
}
// set format flags that this output type requires
func (h *OutputHandler) setFormatFlags(bitMask logFormat) {
h.format |= bitMask
}
// Enabled returns whether this logger is enabled for this level.
func (h *OutputHandler) Enabled(_ context.Context, level slog.Level) bool {
minLevel := slog.LevelInfo
if h.opts.Level != nil {
minLevel = h.opts.Level.Level()
}
return level >= minLevel
}
// Create a log header in Go standard log format.
func (h *OutputHandler) formatStdLogHeader(buf *bytes.Buffer, level slog.Level, t time.Time, object string, lineInfo string) {
// Add time in Go standard format if requested
if h.format&(logFormatDate|logFormatTime|logFormatMicroseconds) != 0 {
if h.format&logFormatUTC != 0 {
t = t.UTC()
}
if h.format&logFormatDate != 0 {
year, month, day := t.Date()
fmt.Fprintf(buf, "%04d/%02d/%02d ", year, month, day)
}
if h.format&(logFormatTime|logFormatMicroseconds) != 0 {
hour, min, sec := t.Clock()
fmt.Fprintf(buf, "%02d:%02d:%02d", hour, min, sec)
if h.format&logFormatMicroseconds != 0 {
fmt.Fprintf(buf, ".%06d", t.Nanosecond()/1e3)
}
buf.WriteByte(' ')
}
}
// Add source code filename:line if requested
if h.format&(logFormatShortFile|logFormatLongFile) != 0 && lineInfo != "" {
buf.WriteString(lineInfo)
buf.WriteByte(':')
buf.WriteByte(' ')
}
// Add PID if requested
if h.format&logFormatPid != 0 {
fmt.Fprintf(buf, "[%d] ", os.Getpid())
}
// Add log level if required
if h.format&logFormatNoLevel == 0 {
levelStr := slogLevelToString(level)
fmt.Fprintf(buf, "%-6s: ", levelStr)
}
// Add object if passed
if object != "" {
buf.WriteString(object)
buf.WriteByte(':')
buf.WriteByte(' ')
}
}
// Create a log in standard Go log format into buf.
func (h *OutputHandler) textLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) error {
var lineInfo string
if h.format&(logFormatShortFile|logFormatLongFile) != 0 {
lineInfo = getCaller(2)
}
var object string
r.Attrs(func(attr slog.Attr) bool {
if attr.Key == "object" {
object = attr.Value.String()
return false
}
return true
})
h.formatStdLogHeader(buf, r.Level, r.Time, object, lineInfo)
buf.WriteString(r.Message)
if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { // Ensure newline
buf.WriteByte('\n')
}
return nil
}
// Create a log in JSON format into buf.
func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) (err error) {
// Call the JSON handler to create the JSON in buf
r.AddAttrs(
slog.String("source", getCaller(2)),
)
h.mu.Lock()
err = h.jsonHandler.Handle(ctx, r)
if err == nil {
_, err = h.jsonBuf.WriteTo(buf)
}
h.mu.Unlock()
return err
}
// Handle outputs a log in the current format
func (h *OutputHandler) Handle(ctx context.Context, r slog.Record) (err error) {
var (
bufJSON *bytes.Buffer
bufText *bytes.Buffer
buf *bytes.Buffer
)
// Check whether we need to build Text or JSON logs or both
needJSON := h.format&logFormatJSON != 0
needText := !needJSON
for _, out := range h.outputExtra {
if out.json {
needJSON = true
} else {
needText = true
}
}
if needJSON {
var bufJSONBack [256]byte
bufJSON = bytes.NewBuffer(bufJSONBack[:0])
err = h.jsonLog(ctx, bufJSON, r)
if err != nil {
return err
}
}
if needText {
var bufTextBack [256]byte
bufText = bytes.NewBuffer(bufTextBack[:0])
err = h.textLog(ctx, bufText, r)
if err != nil {
return err
}
}
h.mu.Lock()
defer h.mu.Unlock()
// Do the log, either to the default destination or to the alternate logging system
if h.format&logFormatJSON != 0 {
buf = bufJSON
} else {
buf = bufText
}
if len(h.output) > 0 {
h.output[len(h.output)-1](r.Level, buf.String())
err = nil
} else {
_, err = h.writer.Write(buf.Bytes())
}
// Log to any additional destinations required
for _, out := range h.outputExtra {
if out.json {
out.output(r.Level, bufJSON.String())
} else {
out.output(r.Level, bufText.String())
}
}
return err
}
// WithAttrs creates a new handler with the same writer, options, and flags.
// Attributes are ignored for the output format of this specific handler.
func (h *OutputHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return NewOutputHandler(h.writer, &h.opts, h.format)
}
// WithGroup creates a new handler with the same writer, options, and flags.
// Groups are ignored for the output format of this specific handler.
func (h *OutputHandler) WithGroup(name string) slog.Handler {
return NewOutputHandler(h.writer, &h.opts, h.format)
}
// Check interface
var _ slog.Handler = (*OutputHandler)(nil)

View File

@@ -1,264 +0,0 @@
package log
import (
"bytes"
"context"
"fmt"
"os"
"regexp"
"strings"
"testing"
"time"
"log/slog"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
utcPlusOne = time.FixedZone("UTC+1", 1*60*60)
t0 = time.Date(2020, 1, 2, 3, 4, 5, 123456000, utcPlusOne)
)
// Test slogLevelToString covers all mapped levels and an unknown level.
func TestSlogLevelToString(t *testing.T) {
tests := []struct {
level slog.Level
want string
}{
{slog.LevelDebug, "DEBUG"},
{slog.LevelInfo, "INFO"},
{fs.SlogLevelNotice, "NOTICE"},
{slog.LevelWarn, "WARNING"},
{slog.LevelError, "ERROR"},
{fs.SlogLevelCritical, "CRITICAL"},
{fs.SlogLevelAlert, "ALERT"},
{fs.SlogLevelEmergency, "EMERGENCY"},
// Unknown level should fall back to .String()
{slog.Level(1234), slog.Level(1234).String()},
}
for _, tc := range tests {
got := slogLevelToString(tc.level)
assert.Equal(t, tc.want, got)
}
}
// Test mapLogLevelNames replaces only the LevelKey attr and lowercases it.
func TestMapLogLevelNames(t *testing.T) {
a := slog.Any(slog.LevelKey, slog.LevelWarn)
mapped := mapLogLevelNames(nil, a)
val, ok := mapped.Value.Any().(string)
if !ok || val != "warning" {
t.Errorf("mapLogLevelNames did not lowercase level: got %v", mapped.Value.Any())
}
// non-level attr should remain unchanged
other := slog.String("foo", "bar")
out := mapLogLevelNames(nil, other)
assert.Equal(t, out.Value, other.Value, "mapLogLevelNames changed a non-level attr")
}
// Test getCaller returns a file:line string of the correct form.
func TestGetCaller(t *testing.T) {
out := getCaller(0)
assert.NotEqual(t, "", out)
match := regexp.MustCompile(`^([^:]+):(\d+)$`).FindStringSubmatch(out)
assert.NotNil(t, match)
// Can't test this as it skips the /log/ directory!
// assert.Equal(t, "slog_test.go", match[1])
}
// Test formatStdLogHeader for various flag combinations.
func TestFormatStdLogHeader(t *testing.T) {
cases := []struct {
name string
format logFormat
lineInfo string
object string
wantPrefix string
}{
{"dateTime", logFormatDate | logFormatTime, "", "", "2020/01/02 03:04:05 "},
{"time", logFormatTime, "", "", "03:04:05 "},
{"date", logFormatDate, "", "", "2020/01/02 "},
{"dateTimeUTC", logFormatDate | logFormatTime | logFormatUTC, "", "", "2020/01/02 02:04:05 "},
{"dateTimeMicro", logFormatDate | logFormatTime | logFormatMicroseconds, "", "", "2020/01/02 03:04:05.123456 "},
{"micro", logFormatMicroseconds, "", "", "03:04:05.123456 "},
{"shortFile", logFormatShortFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
{"longFile", logFormatLongFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
{"timePID", logFormatPid, "", "", fmt.Sprintf("[%d] ", os.Getpid())},
{"levelObject", 0, "", "myobj", "INFO : myobj: "},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
h := &OutputHandler{format: tc.format}
buf := &bytes.Buffer{}
h.formatStdLogHeader(buf, slog.LevelInfo, t0, tc.object, tc.lineInfo)
if !strings.HasPrefix(buf.String(), tc.wantPrefix) {
t.Errorf("%s: got %q; want prefix %q", tc.name, buf.String(), tc.wantPrefix)
}
})
}
}
// Test Enabled honors the HandlerOptions.Level.
func TestEnabled(t *testing.T) {
h := NewOutputHandler(&bytes.Buffer{}, nil, 0)
assert.True(t, h.Enabled(context.Background(), slog.LevelInfo))
assert.False(t, h.Enabled(context.Background(), slog.LevelDebug))
opts := &slog.HandlerOptions{Level: slog.LevelDebug}
h2 := NewOutputHandler(&bytes.Buffer{}, opts, 0)
assert.True(t, h2.Enabled(context.Background(), slog.LevelDebug))
}
// Test clearFormatFlags and setFormatFlags bitwise ops.
func TestClearSetFormatFlags(t *testing.T) {
h := &OutputHandler{format: logFormatDate | logFormatTime}
h.clearFormatFlags(logFormatTime)
assert.True(t, h.format&logFormatTime == 0)
h.setFormatFlags(logFormatMicroseconds)
assert.True(t, h.format&logFormatMicroseconds != 0)
}
// Test SetOutput and ResetOutput override the default writer.
func TestSetResetOutput(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, 0)
var gotOverride string
out := func(_ slog.Level, txt string) {
gotOverride = txt
}
h.SetOutput(out)
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
require.NoError(t, h.Handle(context.Background(), r))
assert.NotEqual(t, "", gotOverride)
require.Equal(t, "", buf.String())
h.ResetOutput()
require.NoError(t, h.Handle(context.Background(), r))
require.NotEqual(t, "", buf.String())
}
// Test AddOutput sends to extra destinations.
func TestAddOutput(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
var extraText string
out := func(_ slog.Level, txt string) {
extraText = txt
}
h.AddOutput(false, out)
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
require.NoError(t, h.Handle(context.Background(), r))
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
}
// Test AddOutputJSON sends JSON to extra destinations.
func TestAddOutputJSON(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
var extraText string
out := func(_ slog.Level, txt string) {
extraText = txt
}
h.AddOutput(true, out)
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
require.NoError(t, h.Handle(context.Background(), r))
assert.NotEqual(t, "", extraText)
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
assert.True(t, strings.HasPrefix(extraText, `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
assert.True(t, strings.HasSuffix(extraText, "\"}\n"))
}
// Test AddOutputUseJSONLog sends text to extra destinations.
func TestAddOutputUseJSONLog(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime|logFormatJSON)
var extraText string
out := func(_ slog.Level, txt string) {
extraText = txt
}
h.AddOutput(false, out)
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
require.NoError(t, h.Handle(context.Background(), r))
assert.NotEqual(t, "", extraText)
assert.True(t, strings.HasPrefix(buf.String(), `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
assert.True(t, strings.HasSuffix(buf.String(), "\"}\n"))
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
}
// Test WithAttrs and WithGroup return new handlers with same settings.
func TestWithAttrsAndGroup(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, logFormatDate)
h2 := h.WithAttrs([]slog.Attr{slog.String("k", "v")})
if _, ok := h2.(*OutputHandler); !ok {
t.Error("WithAttrs returned wrong type")
}
h3 := h.WithGroup("grp")
if _, ok := h3.(*OutputHandler); !ok {
t.Error("WithGroup returned wrong type")
}
}
// Test textLog and jsonLog directly for basic correctness.
func TestTextLogAndJsonLog(t *testing.T) {
h := NewOutputHandler(&bytes.Buffer{}, nil, logFormatDate|logFormatTime)
r := slog.NewRecord(t0, slog.LevelWarn, "msg!", 0)
r.AddAttrs(slog.String("object", "obj"))
// textLog
bufText := &bytes.Buffer{}
require.NoError(t, h.textLog(context.Background(), bufText, r))
out := bufText.String()
if !strings.Contains(out, "WARNING") || !strings.Contains(out, "obj:") || !strings.HasSuffix(out, "\n") {
t.Errorf("textLog output = %q", out)
}
// jsonLog
bufJSON := &bytes.Buffer{}
require.NoError(t, h.jsonLog(context.Background(), bufJSON, r))
j := bufJSON.String()
if !strings.Contains(j, `"level":"warning"`) || !strings.Contains(j, `"msg":"msg!"`) {
t.Errorf("jsonLog output = %q", j)
}
}
// Table-driven test for JSON vs text Handle behavior.
func TestHandleFormatFlags(t *testing.T) {
r := slog.NewRecord(t0, slog.LevelInfo, "hi", 0)
cases := []struct {
name string
format logFormat
wantJSON bool
}{
{"textMode", 0, false},
{"jsonMode", logFormatJSON, true},
}
for _, tc := range cases {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, tc.format)
require.NoError(t, h.Handle(context.Background(), r))
out := buf.String()
if tc.wantJSON {
if !strings.HasPrefix(out, "{") || !strings.Contains(out, `"level":"info"`) {
t.Errorf("%s: got %q; want JSON", tc.name, out)
}
} else {
if !strings.Contains(out, "INFO") {
t.Errorf("%s: got %q; want text INFO", tc.name, out)
}
}
}
}

View File

@@ -11,7 +11,7 @@ import (
)
// Starts syslog if configured, returns true if it was started
func startSysLog(handler *OutputHandler) bool {
func startSysLog() bool {
fs.Fatalf(nil, "--syslog not supported on %s platform", runtime.GOOS)
return false
}

View File

@@ -5,7 +5,7 @@
package log
import (
"log/slog"
"log"
"log/syslog"
"os"
"path"
@@ -39,7 +39,7 @@ var (
)
// Starts syslog
func startSysLog(handler *OutputHandler) bool {
func startSysLog() bool {
facility, ok := syslogFacilityMap[Opt.SyslogFacility]
if !ok {
fs.Fatalf(nil, "Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility)
@@ -49,27 +49,27 @@ func startSysLog(handler *OutputHandler) bool {
if err != nil {
fs.Fatalf(nil, "Failed to start syslog: %v", err)
}
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
handler.setFormatFlags(logFormatNoLevel)
handler.SetOutput(func(level slog.Level, text string) {
log.SetFlags(0)
log.SetOutput(w)
fs.LogOutput = func(level fs.LogLevel, text string) {
switch level {
case fs.SlogLevelEmergency:
case fs.LogLevelEmergency:
_ = w.Emerg(text)
case fs.SlogLevelAlert:
case fs.LogLevelAlert:
_ = w.Alert(text)
case fs.SlogLevelCritical:
case fs.LogLevelCritical:
_ = w.Crit(text)
case slog.LevelError:
case fs.LogLevelError:
_ = w.Err(text)
case slog.LevelWarn:
case fs.LogLevelWarning:
_ = w.Warning(text)
case fs.SlogLevelNotice:
case fs.LogLevelNotice:
_ = w.Notice(text)
case slog.LevelInfo:
case fs.LogLevelInfo:
_ = w.Info(text)
case slog.LevelDebug:
case fs.LogLevelDebug:
_ = w.Debug(text)
}
})
}
return true
}

View File

@@ -11,7 +11,7 @@ import (
)
// Enables systemd logs if configured or if auto-detected
func startSystemdLog(handler *OutputHandler) bool {
func startSystemdLog() bool {
fs.Fatalf(nil, "--log-systemd not supported on %s platform", runtime.GOOS)
return false
}

View File

@@ -7,47 +7,54 @@ package log
import (
"fmt"
"log"
"log/slog"
"strconv"
"strings"
"github.com/coreos/go-systemd/v22/journal"
"github.com/rclone/rclone/fs"
)
// Enables systemd logs if configured or if auto-detected
func startSystemdLog(handler *OutputHandler) bool {
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
handler.setFormatFlags(logFormatNoLevel)
func startSystemdLog() bool {
flagsStr := "," + Opt.Format + ","
var flags int
if strings.Contains(flagsStr, ",longfile,") {
flags |= log.Llongfile
}
if strings.Contains(flagsStr, ",shortfile,") {
flags |= log.Lshortfile
}
log.SetFlags(flags)
// TODO: Use the native journal.Print approach rather than a custom implementation
handler.SetOutput(func(level slog.Level, text string) {
fs.LogOutput = func(level fs.LogLevel, text string) {
text = fmt.Sprintf("<%s>%-6s: %s", systemdLogPrefix(level), level, text)
_ = log.Output(4, text)
})
}
return true
}
var slogLevelToSystemdPrefix = map[slog.Level]journal.Priority{
fs.SlogLevelEmergency: journal.PriEmerg,
fs.SlogLevelAlert: journal.PriAlert,
fs.SlogLevelCritical: journal.PriCrit,
slog.LevelError: journal.PriErr,
slog.LevelWarn: journal.PriWarning,
fs.SlogLevelNotice: journal.PriNotice,
slog.LevelInfo: journal.PriInfo,
slog.LevelDebug: journal.PriDebug,
var logLevelToSystemdPrefix = []journal.Priority{
fs.LogLevelEmergency: journal.PriEmerg,
fs.LogLevelAlert: journal.PriAlert,
fs.LogLevelCritical: journal.PriCrit,
fs.LogLevelError: journal.PriErr,
fs.LogLevelWarning: journal.PriWarning,
fs.LogLevelNotice: journal.PriNotice,
fs.LogLevelInfo: journal.PriInfo,
fs.LogLevelDebug: journal.PriDebug,
}
func systemdLogPrefix(l slog.Level) string {
prio, ok := slogLevelToSystemdPrefix[l]
if !ok {
func systemdLogPrefix(l fs.LogLevel) string {
if l >= fs.LogLevel(len(logLevelToSystemdPrefix)) {
return ""
}
return strconv.Itoa(int(prio))
return strconv.Itoa(int(logLevelToSystemdPrefix[l]))
}
func isJournalStream() bool {
if usingJournald, _ := journal.StderrIsJournalStream(); usingJournald {
return true
}
return false
}

View File

@@ -6,11 +6,12 @@ import (
"errors"
"fmt"
"io"
"log"
"os"
"sort"
"strings"
"testing"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/hash"
@@ -64,16 +65,18 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
accounting.GlobalStats().ResetCounters()
var buf bytes.Buffer
log.SetOutput(&buf)
defer func() {
log.SetOutput(os.Stderr)
}()
opt := operations.CheckOpt{
Fdst: r.Fremote,
Fsrc: r.Flocal,
OneWay: oneway,
}
addBuffers(&opt)
var err error
buf := bilib.CaptureOutput(func() {
err = checkFunction(ctx, &opt)
})
err := checkFunction(ctx, &opt)
gotErrors := accounting.GlobalStats().GetErrors()
gotChecks := accounting.GlobalStats().GetChecks()
if wantErrors == 0 && err != nil {
@@ -85,7 +88,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
if wantErrors != gotErrors {
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
}
if gotChecks > 0 && !strings.Contains(string(buf), "matching files") {
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
t.Errorf("%d: Total files matching line missing", i)
}
if wantChecks != gotChecks {
@@ -386,6 +389,9 @@ func testCheckSum(t *testing.T, download bool) {
checkRun := func(runNo, wantChecks, wantErrors int, want wantType) {
accounting.GlobalStats().ResetCounters()
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer log.SetOutput(os.Stderr)
opt := operations.CheckOpt{
Combined: new(bytes.Buffer),
@@ -395,10 +401,8 @@ func testCheckSum(t *testing.T, download bool) {
MissingOnSrc: new(bytes.Buffer),
MissingOnDst: new(bytes.Buffer),
}
var err error
buf := bilib.CaptureOutput(func() {
err = operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
})
err := operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
gotErrors := int(accounting.GlobalStats().GetErrors())
if wantErrors == 0 {
assert.NoError(t, err, "unexpected error in run %d", runNo)
@@ -410,7 +414,7 @@ func testCheckSum(t *testing.T, download bool) {
gotChecks := int(accounting.GlobalStats().GetChecks())
if wantChecks > 0 || gotChecks > 0 {
assert.Contains(t, string(buf), "matching files", "missing matching files in run %d", runNo)
assert.Contains(t, buf.String(), "matching files", "missing matching files in run %d", runNo)
}
assert.Equal(t, wantChecks, gotChecks, "wrong number of checks in run %d", runNo)

4
go.mod
View File

@@ -66,6 +66,7 @@ require (
github.com/rivo/uniseg v0.4.7
github.com/rogpeppe/go-internal v1.14.1
github.com/shirou/gopsutil/v4 v4.25.1
github.com/sirupsen/logrus v1.9.3
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
@@ -137,7 +138,7 @@ require (
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.8.3 // indirect
github.com/ebitengine/purego v0.8.2 // indirect
github.com/emersion/go-message v0.18.0 // indirect
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
@@ -207,7 +208,6 @@ require (
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/samber/lo v1.47.0 // indirect
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sony/gobreaker v0.5.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
github.com/tklauser/go-sysconf v0.3.13 // indirect

4
go.sum
View File

@@ -219,8 +219,8 @@ github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq4
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=