mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
32 Commits
fix-max-me
...
fix-8233-g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7c4d1f8f6 | ||
|
|
15510c66d4 | ||
|
|
dfa4d94827 | ||
|
|
36b89960e3 | ||
|
|
a3f3fc61ee | ||
|
|
b8fde4fc46 | ||
|
|
c37fe733df | ||
|
|
b31659904f | ||
|
|
ebcf51336e | ||
|
|
a334bba643 | ||
|
|
d4fd93e7f3 | ||
|
|
6644bdba0f | ||
|
|
68a65e878f | ||
|
|
7606ad8294 | ||
|
|
32847e88b4 | ||
|
|
2e879586bd | ||
|
|
9d55b2411f | ||
|
|
fe880c0fac | ||
|
|
b160089be7 | ||
|
|
c2254164f8 | ||
|
|
e57b94c4ac | ||
|
|
3273bf3716 | ||
|
|
f5501edfcf | ||
|
|
2404831725 | ||
|
|
9f0e237931 | ||
|
|
f752eaa298 | ||
|
|
1f8373fae8 | ||
|
|
b94f80b9d7 | ||
|
|
5f4e983ccb | ||
|
|
28b6f38135 | ||
|
|
6adb4056bb | ||
|
|
0b9671313b |
@@ -572,3 +572,19 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
18
README.md
18
README.md
@@ -1,20 +1,4 @@
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
|
||||
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -532,7 +532,9 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -629,7 +631,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ var (
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
errReadOnly = errors.New("can't upload files in read only mode")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -52,19 +53,31 @@ const (
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
|
||||
)
|
||||
|
||||
var (
|
||||
// scopes needed for read write access
|
||||
scopesReadWrite = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeAppendOnly,
|
||||
scopeReadOnly,
|
||||
scopeReadWrite,
|
||||
}
|
||||
|
||||
// scopes needed for read only access
|
||||
scopesReadOnly = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadOnly,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
Scopes: scopesReadWrite,
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -100,9 +113,9 @@ func init() {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
oauthConfig.Scopes = scopesReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
@@ -1120,6 +1133,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
if o.fs.opt.ReadOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
@@ -1177,7 +1193,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("failed to commit batch: %w", err)
|
||||
}
|
||||
|
||||
o.setMetaData(info)
|
||||
// Store the info back into the Object
|
||||
// info == nil will only happen if we are uploading async batches
|
||||
// we don't have anything sensible to write into the info then.
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Add upload to internal storage
|
||||
if pattern.isUpload {
|
||||
|
||||
@@ -252,18 +252,14 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
// icloud has some weird http codes
|
||||
if err != nil && resp != nil && resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
|
||||
@@ -56,6 +56,7 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
maxSinglePartSize = 4 * fs.Mebi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -138,6 +139,21 @@ func init() {
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
|
||||
This is disabled by default as uploading using single part uploads
|
||||
causes rclone to use twice the storage on Onedrive business as when
|
||||
rclone sets the modification time after the upload Onedrive creates a
|
||||
new version.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/1716
|
||||
`,
|
||||
Default: fs.SizeSuffix(-1),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
@@ -746,6 +762,7 @@ Examples:
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
@@ -1022,6 +1039,13 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxSinglePartSize {
|
||||
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -1035,6 +1059,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
@@ -2469,6 +2497,10 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return false, nil
|
||||
}
|
||||
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
|
||||
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
|
||||
time.Sleep(5 * time.Second) // a little delay to help things along
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -2563,8 +2595,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
if size < 0 || size > int64(maxSinglePartSize) {
|
||||
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -2617,9 +2649,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size == 0 {
|
||||
} else if size >= 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -482,6 +483,14 @@ Example:
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_is_hardlink",
|
||||
Default: false,
|
||||
@@ -545,6 +554,7 @@ type Options struct {
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
CopyIsHardlink bool `config:"copy_is_hardlink"`
|
||||
}
|
||||
|
||||
@@ -570,6 +580,7 @@ type Fs struct {
|
||||
savedpswd string
|
||||
sessions atomic.Int32 // count in use sessions
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -867,6 +878,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
|
||||
@@ -31,6 +31,8 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
|
||||
40
backend/webdav/tus-errors.go
Normal file
40
backend/webdav/tus-errors.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrChunkSize is returned when the chunk size is zero
|
||||
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
|
||||
// ErrNilLogger is returned when the logger is nil
|
||||
ErrNilLogger = errors.New("tus logger can't be nil")
|
||||
// ErrNilStore is returned when the store is nil
|
||||
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
|
||||
// ErrNilUpload is returned when the upload is nil
|
||||
ErrNilUpload = errors.New("tus upload can't be nil")
|
||||
// ErrLargeUpload is returned when the upload body is to large
|
||||
ErrLargeUpload = errors.New("tus upload body is to large")
|
||||
// ErrVersionMismatch is returned when the tus protocol version is mismatching
|
||||
ErrVersionMismatch = errors.New("tus protocol version mismatch")
|
||||
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
|
||||
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
|
||||
// ErrUploadNotFound is returned when the tus upload is not found
|
||||
ErrUploadNotFound = errors.New("tus upload not found")
|
||||
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
|
||||
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
|
||||
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
|
||||
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
|
||||
)
|
||||
|
||||
// ClientError represents an error state of a client
|
||||
type ClientError struct {
|
||||
Code int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Error returns an error string containing the client error code
|
||||
func (c ClientError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code: %d", c.Code)
|
||||
}
|
||||
88
backend/webdav/tus-upload.go
Normal file
88
backend/webdav/tus-upload.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Metadata is a typedef for a string to string map to hold metadata
|
||||
type Metadata map[string]string
|
||||
|
||||
// Upload is a struct containing the file status during upload
|
||||
type Upload struct {
|
||||
stream io.ReadSeeker
|
||||
size int64
|
||||
offset int64
|
||||
|
||||
Fingerprint string
|
||||
Metadata Metadata
|
||||
}
|
||||
|
||||
// Updates the Upload information based on offset.
|
||||
func (u *Upload) updateProgress(offset int64) {
|
||||
u.offset = offset
|
||||
}
|
||||
|
||||
// Finished returns whether this upload is finished or not.
|
||||
func (u *Upload) Finished() bool {
|
||||
return u.offset >= u.size
|
||||
}
|
||||
|
||||
// Progress returns the progress in a percentage.
|
||||
func (u *Upload) Progress() int64 {
|
||||
return (u.offset * 100) / u.size
|
||||
}
|
||||
|
||||
// Offset returns the current upload offset.
|
||||
func (u *Upload) Offset() int64 {
|
||||
return u.offset
|
||||
}
|
||||
|
||||
// Size returns the size of the upload body.
|
||||
func (u *Upload) Size() int64 {
|
||||
return u.size
|
||||
}
|
||||
|
||||
// EncodedMetadata encodes the upload metadata.
|
||||
func (u *Upload) EncodedMetadata() string {
|
||||
var encoded []string
|
||||
|
||||
for k, v := range u.Metadata {
|
||||
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
|
||||
}
|
||||
|
||||
return strings.Join(encoded, ",")
|
||||
}
|
||||
|
||||
func b64encode(s string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(s))
|
||||
}
|
||||
|
||||
// NewUpload creates a new upload from an io.Reader.
|
||||
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
|
||||
stream, ok := reader.(io.ReadSeeker)
|
||||
|
||||
if !ok {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := buf.ReadFrom(reader)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
stream = bytes.NewReader(buf.Bytes())
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata)
|
||||
}
|
||||
|
||||
return &Upload{
|
||||
stream: stream,
|
||||
size: size,
|
||||
|
||||
Fingerprint: fingerprint,
|
||||
Metadata: metadata,
|
||||
}
|
||||
}
|
||||
191
backend/webdav/tus-uploader.go
Normal file
191
backend/webdav/tus-uploader.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Uploader holds all information about a currently running upload
|
||||
type Uploader struct {
|
||||
fs *Fs
|
||||
url string
|
||||
upload *Upload
|
||||
offset int64
|
||||
aborted bool
|
||||
uploadSubs []chan Upload
|
||||
notifyChan chan bool
|
||||
overridePatchMethod bool
|
||||
}
|
||||
|
||||
// NotifyUploadProgress subscribes to progress updates.
|
||||
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
|
||||
u.uploadSubs = append(u.uploadSubs, c)
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
|
||||
if resp == nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 204:
|
||||
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
|
||||
*newOff = off
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
||||
case 409:
|
||||
return false, ErrOffsetMismatch
|
||||
case 412:
|
||||
return false, ErrVersionMismatch
|
||||
case 413:
|
||||
return false, ErrLargeUpload
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
|
||||
var method string
|
||||
|
||||
if !u.overridePatchMethod {
|
||||
method = "PATCH"
|
||||
} else {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
|
||||
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
|
||||
extraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
|
||||
if u.overridePatchMethod {
|
||||
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
|
||||
}
|
||||
|
||||
url, err := url.Parse(u.url)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
|
||||
}
|
||||
|
||||
// FIXME: Use GetBody func as in chunking.go
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Path: url.Path,
|
||||
NoResponse: true,
|
||||
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
|
||||
ContentLength: &size,
|
||||
Body: body,
|
||||
ContentType: "application/offset+octet-stream",
|
||||
ExtraHeaders: extraHeaders,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
var newOffset int64
|
||||
|
||||
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err := u.fs.srv.Call(ctx, &opts)
|
||||
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("uploadChunk failed: %w", err)
|
||||
// FIXME What do we do here? Remove the entire upload?
|
||||
// See https://github.com/tus/tusd/issues/176
|
||||
}
|
||||
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
// Upload uploads the entire body to the server.
|
||||
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
|
||||
cnt := 1
|
||||
|
||||
fs.Debug(u.fs, "Uploaded starts")
|
||||
for u.offset < u.upload.size && !u.aborted {
|
||||
err := u.UploadChunk(ctx, cnt, options...)
|
||||
cnt++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fs.Debug(u.fs, "-- Uploaded finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadChunk uploads a single chunk.
|
||||
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
|
||||
chunkSize := u.fs.opt.ChunkSize
|
||||
data := make([]byte, chunkSize)
|
||||
|
||||
_, err := u.upload.stream.Seek(u.offset, 0)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := u.upload.stream.Read(data)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
body := bytes.NewBuffer(data[:size])
|
||||
|
||||
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
|
||||
} else {
|
||||
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
u.offset = newOffset
|
||||
|
||||
u.upload.updateProgress(u.offset)
|
||||
|
||||
u.notifyChan <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Waits for a signal to broadcast to all subscribers
|
||||
func (u *Uploader) broadcastProgress() {
|
||||
for range u.notifyChan {
|
||||
for _, c := range u.uploadSubs {
|
||||
c <- *u.upload
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader.
|
||||
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
|
||||
notifyChan := make(chan bool)
|
||||
|
||||
uploader := &Uploader{
|
||||
f,
|
||||
url,
|
||||
upload,
|
||||
offset,
|
||||
false,
|
||||
nil,
|
||||
notifyChan,
|
||||
false,
|
||||
}
|
||||
|
||||
go uploader.broadcastProgress()
|
||||
|
||||
return uploader
|
||||
}
|
||||
108
backend/webdav/tus.go
Normal file
108
backend/webdav/tus.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
Chunked upload based on the tus protocol for ownCloud Infinite Scale
|
||||
See https://tus.io/protocols/resumable-upload
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
fn := filepath.Base(src.Remote())
|
||||
metadata := map[string]string{
|
||||
"filename": fn,
|
||||
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
|
||||
"filetype": contentType,
|
||||
}
|
||||
|
||||
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
|
||||
fingerprint := ""
|
||||
|
||||
// create an upload from a file.
|
||||
upload := NewUpload(in, src.Size(), metadata, fingerprint)
|
||||
|
||||
// create the uploader.
|
||||
uploader, err := o.CreateUploader(ctx, upload, options...)
|
||||
if err == nil {
|
||||
// start the uploading process.
|
||||
err = uploader.Upload(ctx, options...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 201:
|
||||
location := resp.Header.Get("Location")
|
||||
return false, location, nil
|
||||
case 412:
|
||||
return false, "", ErrVersionMismatch
|
||||
case 413:
|
||||
return false, "", ErrLargeUpload
|
||||
}
|
||||
|
||||
retry, err := f.shouldRetry(ctx, resp, err)
|
||||
return retry, "", err
|
||||
}
|
||||
|
||||
// CreateUploader creates a new upload to the server.
|
||||
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
|
||||
if u == nil {
|
||||
return nil, ErrNilUpload
|
||||
}
|
||||
|
||||
// if c.Config.Resume && len(u.Fingerprint) == 0 {
|
||||
// return nil, ErrFingerprintNotSet
|
||||
// }
|
||||
|
||||
l := int64(0)
|
||||
p := o.filePath()
|
||||
// cut the filename off
|
||||
dir, _ := filepath.Split(p)
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: dir,
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.endpointURL,
|
||||
ContentLength: &l,
|
||||
ExtraHeaders: o.extraHeaders(ctx, o),
|
||||
Options: options,
|
||||
}
|
||||
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
|
||||
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
|
||||
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
|
||||
|
||||
var tusLocation string
|
||||
// rclone http call
|
||||
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var retry bool
|
||||
res, err := o.fs.srv.Call(ctx, &opts)
|
||||
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
|
||||
uploader := NewUploader(o.fs, tusLocation, u, 0)
|
||||
|
||||
return uploader, nil
|
||||
}
|
||||
@@ -84,7 +84,10 @@ func init() {
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
Value: "owncloud",
|
||||
Help: "Owncloud",
|
||||
Help: "Owncloud 10 PHP based WebDAV server",
|
||||
}, {
|
||||
Value: "infinitescale",
|
||||
Help: "ownCloud Infinite Scale",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Sharepoint Online, authenticated by Microsoft account",
|
||||
@@ -212,6 +215,7 @@ type Fs struct {
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
canTus bool // supports the TUS upload protocol
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
propsetMtime bool // set if can use propset
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
@@ -632,6 +636,15 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
case "infinitescale":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = false
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = false
|
||||
f.canTus = true
|
||||
f.opt.ChunkSize = 10 * fs.Mebi
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
@@ -1329,7 +1342,7 @@ func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
@@ -1373,7 +1386,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
@@ -1499,9 +1512,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
|
||||
fs.Debugf(src, "Update will use the tus protocol to upload")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
err = o.updateViaTus(ctx, in, contentType, src, options...)
|
||||
if err != nil {
|
||||
fs.Debug(src, "tus update failed.")
|
||||
return fmt.Errorf("tus update failed: %w", err)
|
||||
}
|
||||
} else if o.shouldUseChunkedUpload(src) {
|
||||
if o.fs.opt.Vendor == "nextcloud" {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
} else {
|
||||
fs.Debug(src, "Chunking - unknown vendor")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1513,10 +1538,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unchunked simple update failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
@@ -1526,7 +1550,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
|
||||
@@ -3,20 +3,22 @@ package bilib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
buf.WriteString(text)
|
||||
})
|
||||
defer func() {
|
||||
log.Handler.ResetOutput()
|
||||
log.Handler.SetLevel(oldLevel)
|
||||
}()
|
||||
fun()
|
||||
log.SetOutput(logSave)
|
||||
logrus.SetOutput(logrusSave)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ package ncdu
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -925,23 +927,19 @@ func (u *UI) Run() error {
|
||||
return fmt.Errorf("screen init: %w", err)
|
||||
}
|
||||
|
||||
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
|
||||
if logOutput := fs.LogOutput; !log.Redirected() {
|
||||
type log struct {
|
||||
text string
|
||||
level fs.LogLevel
|
||||
}
|
||||
var logs []log
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
// Hijack log output so that it doesn't corrupt the screen.
|
||||
if !log.Redirected() {
|
||||
var logs []string
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
if len(logs) > 100 {
|
||||
logs = logs[len(logs)-100:]
|
||||
}
|
||||
logs = append(logs, log{level: level, text: text})
|
||||
}
|
||||
logs = append(logs, text)
|
||||
})
|
||||
defer func() {
|
||||
fs.LogOutput = logOutput
|
||||
for i := range logs {
|
||||
logOutput(logs[i].level, logs[i].text)
|
||||
log.Handler.ResetOutput()
|
||||
for _, text := range logs {
|
||||
_, _ = os.Stderr.WriteString(text)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
const (
|
||||
// interval between progress prints
|
||||
defaultProgressInterval = 500 * time.Millisecond
|
||||
// time format for logging
|
||||
logTimeFormat = "2006/01/02 15:04:05"
|
||||
)
|
||||
|
||||
// startProgress starts the progress bar printing
|
||||
@@ -28,15 +26,13 @@ const (
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
stopStats := make(chan struct{})
|
||||
oldLogOutput := fs.LogOutput
|
||||
oldSyncPrint := operations.SyncPrintf
|
||||
|
||||
if !log.Redirected() {
|
||||
// Intercept the log calls if not logging to file or syslog
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
|
||||
|
||||
}
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
printProgress(text)
|
||||
})
|
||||
}
|
||||
|
||||
// Intercept output from functions such as HashLister to stdout
|
||||
@@ -60,7 +56,10 @@ func startProgress() func() {
|
||||
case <-stopStats:
|
||||
ticker.Stop()
|
||||
printProgress("")
|
||||
fs.LogOutput = oldLogOutput
|
||||
if !log.Redirected() {
|
||||
// Reset intercept of the log calls
|
||||
log.Handler.ResetOutput()
|
||||
}
|
||||
operations.SyncPrintf = oldSyncPrint
|
||||
fmt.Println("")
|
||||
return
|
||||
|
||||
@@ -28,7 +28,8 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(objectPath)
|
||||
prefixWithTrailingSlash := objectPath + "/"
|
||||
response.AddPrefix(prefixWithTrailingSlash)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
|
||||
@@ -14,7 +14,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
|
||||
access.
|
||||
|
||||
Please note that some clients may require HTTPS endpoints. See [the
|
||||
SSL docs](#ssl-tls) for more information.
|
||||
SSL docs](#tls-ssl) for more information.
|
||||
|
||||
This command uses the [VFS directory cache](#vfs-virtual-file-system).
|
||||
All the functionality will work with `--vfs-cache-mode off`. Using
|
||||
|
||||
@@ -81,7 +81,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
|
||||
)
|
||||
|
||||
w.handler = http.NewServeMux()
|
||||
w.handler = w.faker.Server()
|
||||
|
||||
if proxy.Opt.AuthProxy != "" {
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
env = "RCLONE_LOG_LEVEL=DEBUG"
|
||||
out, err = rcloneEnv(env, "version", "--quiet")
|
||||
if assert.Error(t, err) {
|
||||
assert.Contains(t, out, " DEBUG : ")
|
||||
assert.Contains(t, out, " DEBUG ")
|
||||
assert.Contains(t, out, "Can't set -q and --log-level")
|
||||
assert.Contains(t, "exit status 1", err.Error())
|
||||
}
|
||||
@@ -329,7 +329,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
jsonLogOK := func() {
|
||||
t.Helper()
|
||||
if assert.NoError(t, err) {
|
||||
assert.Contains(t, out, `{"level":"debug",`)
|
||||
assert.Contains(t, out, `"level":"debug"`)
|
||||
assert.Contains(t, out, `"msg":"Version `)
|
||||
assert.Contains(t, out, `"}`)
|
||||
}
|
||||
|
||||
@@ -961,3 +961,15 @@ put them back in again.` >}}
|
||||
* Markus Gerstel <markus.gerstel@osirium.com>
|
||||
* simwai <16225108+simwai@users.noreply.github.com>
|
||||
* Ben Alex <ben.alex@acegi.com.au>
|
||||
* Klaas Freitag <opensource@freisturz.de> <klaas.freitag@kiteworks.com>
|
||||
* Andrew Kreimer <algonell@gmail.com>
|
||||
* Ed Craig-Wood <138211970+edc-w@users.noreply.github.com>
|
||||
* Christian Richter <crichter@owncloud.com> <1058116+dragonchaser@users.noreply.github.com>
|
||||
* Ralf Haferkamp <r.haferkamp@opencloud.eu>
|
||||
* Jugal Kishore <me@devjugal.com>
|
||||
* Tho Neyugn <nguyentruongtho@users.noreply.github.com>
|
||||
* Ben Boeckel <mathstuf@users.noreply.github.com>
|
||||
* Clément Wehrung <cwehrung@nurves.com>
|
||||
* Jeff Geerling <geerlingguy@mac.com>
|
||||
* Germán Casares <german.casares.march+github@gmail.com>
|
||||
* fhuber <florian.huber@noris.de>
|
||||
|
||||
@@ -5,6 +5,58 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.69.3 - 2025-05-21
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.2...v1.69.3)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Reapply update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* build: Update github.com/ebitengine/purego to work around bug in go1.24.3 (Nick Craig-Wood)
|
||||
|
||||
## v1.69.2 - 2025-05-01
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.1...v1.69.2)
|
||||
|
||||
* Bug fixes
|
||||
* accounting: Fix percentDiff calculation -- (Anagh Kumar Baranwal)
|
||||
* build
|
||||
* Update github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update github.com/golang-jwt/jwt/v5 from 5.2.1 to 5.2.2 to fix CVE-2025-30204 (dependabot[bot])
|
||||
* Update golang.org/x/crypto to v0.35.0 to fix CVE-2025-22869 (Nick Craig-Wood)
|
||||
* Update golang.org/x/net from 0.36.0 to 0.38.0 to fix CVE-2025-22870 (dependabot[bot])
|
||||
* Update golang.org/x/net to 0.36.0. to fix CVE-2025-22869 (dependabot[bot])
|
||||
* Stop building with go < go1.23 as security updates forbade it (Nick Craig-Wood)
|
||||
* Fix docker plugin build (Anagh Kumar Baranwal)
|
||||
* cmd: Fix crash if rclone is invoked without any arguments (Janne Hellsten)
|
||||
* config: Read configuration passwords from stdin even when terminated with EOF (Samantha Bowen)
|
||||
* doc fixes (Andrew Kreimer, Danny Garside, eccoisle, Ed Craig-Wood, emyarod, jack, Jugal Kishore, Markus Gerstel, Michael Kebe, Nick Craig-Wood, simonmcnair, simwai, Zachary Vorhies)
|
||||
* fs: Fix corruption of SizeSuffix with "B" suffix in config (eg --min-size) (Nick Craig-Wood)
|
||||
* lib/http: Fix race between Serve() and Shutdown() (Nick Craig-Wood)
|
||||
* object: Fix memory object out of bounds Seek (Nick Craig-Wood)
|
||||
* operations: Fix call fmt.Errorf with wrong err (alingse)
|
||||
* rc
|
||||
* Disable the metrics server when running `rclone rc` (hiddenmarten)
|
||||
* Fix debug/* commands not being available over unix sockets (Nick Craig-Wood)
|
||||
* serve nfs: Fix unlikely crash (Nick Craig-Wood)
|
||||
* stats: Fix the speed not getting updated after a pause in the processing (Anagh Kumar Baranwal)
|
||||
* sync
|
||||
* Fix cpu spinning when empty directory finding with leading slashes (Nick Craig-Wood)
|
||||
* Copy dir modtimes even when copyEmptySrcDirs is false (ll3006)
|
||||
* VFS
|
||||
* Fix directory cache serving stale data (Lorenz Brun)
|
||||
* Fix inefficient directory caching when directory reads are slow (huanghaojun)
|
||||
* Fix integration test failures (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Metadata: fix error when setting copy-requires-writer-permission on a folder (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Retry link without expiry (Dave Vasilevsky)
|
||||
* HTTP
|
||||
* Correct root if definitely pointing to a file (nielash)
|
||||
* Iclouddrive
|
||||
* Fix so created files are writable (Ben Alex)
|
||||
* Onedrive
|
||||
* Fix metadata ordering in permissions (Nick Craig-Wood)
|
||||
|
||||
## v1.69.1 - 2025-02-14
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.69.1)
|
||||
@@ -60,7 +112,7 @@ description: "Rclone Changelog"
|
||||
* fs: Make `--links` flag global and add new `--local-links` and `--vfs-links` flags (Nick Craig-Wood)
|
||||
* http servers: Disable automatic authentication skipping for unix sockets in http servers (Moises Lima)
|
||||
* This was making it impossible to use unix sockets with an proxy
|
||||
* This might now cause rclone to need authenticaton where it didn't before
|
||||
* This might now cause rclone to need authentication where it didn't before
|
||||
* oauthutil: add support for OAuth client credential flow (Martin Hassack, Nick Craig-Wood)
|
||||
* operations: make log messages consistent for mkdir/rmdir at INFO level (Nick Craig-Wood)
|
||||
* rc: Add `relative` to [vfs/queue-set-expiry](/rc/#vfs-queue-set-expiry) (Nick Craig-Wood)
|
||||
@@ -738,7 +790,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Update all dependencies (Nick Craig-Wood)
|
||||
* Refactor version info and icon resource handling on windows (albertony)
|
||||
* doc updates (albertony, alfish2000, asdffdsazqqq, Dimitri Papadopoulos, Herby Gillot, Joda Stößer, Manoj Ghosh, Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metatadata with a user supplied program (Nick Craig-Wood)
|
||||
* Implement `--metadata-mapper` to transform metadata with a user supplied program (Nick Craig-Wood)
|
||||
* Add `ChunkWriterDoesntSeek` feature flag and set it for b2 (Nick Craig-Wood)
|
||||
* lib/http: Export basic go string functions for use in `--template` (Gabriel Espinoza)
|
||||
* makefile: Use POSIX compatible install arguments (Mina Galić)
|
||||
@@ -853,7 +905,7 @@ instead of of `--size-only`, when `check` is not available.
|
||||
* Fix "fatal error: concurrent map writes" (Nick Craig-Wood)
|
||||
* B2
|
||||
* Fix multipart upload: corrupted on transfer: sizes differ XXX vs 0 (Nick Craig-Wood)
|
||||
* Fix locking window when getting mutipart upload URL (Nick Craig-Wood)
|
||||
* Fix locking window when getting multipart upload URL (Nick Craig-Wood)
|
||||
* Fix server side copies greater than 4GB (Nick Craig-Wood)
|
||||
* Fix chunked streaming uploads (Nick Craig-Wood)
|
||||
* Reduce default `--b2-upload-concurrency` to 4 to reduce memory usage (Nick Craig-Wood)
|
||||
|
||||
@@ -968,8 +968,9 @@ on any OS, and the value is defined as following:
|
||||
- On Unix: `$HOME` if defined, else by looking up current user in OS-specific user database
|
||||
(e.g. passwd file), or else use the result from shell command `cd && pwd`.
|
||||
|
||||
If you run `rclone config file` you will see where the default
|
||||
location is for you.
|
||||
If you run `rclone config file` you will see where the default location is for
|
||||
you. Running `rclone config touch` will ensure a configuration file exists,
|
||||
creating an empty one in the default location if there is none.
|
||||
|
||||
The fact that an existing file `rclone.conf` in the same directory
|
||||
as the rclone executable is always preferred, means that it is easy
|
||||
@@ -980,7 +981,13 @@ same directory.
|
||||
If the location is set to empty string `""` or path to a file
|
||||
with name `notfound`, or the os null device represented by value `NUL` on
|
||||
Windows and `/dev/null` on Unix systems, then rclone will keep the
|
||||
config file in memory only.
|
||||
configuration file in memory only.
|
||||
|
||||
You may see a log message "Config file not found - using defaults" if there is
|
||||
no configuration file. This can be supressed, e.g. if you are using rclone
|
||||
entirely with [on the fly remotes](/docs/#backend-path-to-dir), by using
|
||||
memory-only configuration file or by creating an empty configuration file, as
|
||||
described above.
|
||||
|
||||
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
|
||||
Sections of text, led by a `[section]` header and followed by
|
||||
@@ -1476,12 +1483,21 @@ have a signal to rotate logs.
|
||||
|
||||
### --log-format LIST ###
|
||||
|
||||
Comma separated list of log format options. Accepted options are `date`,
|
||||
`time`, `microseconds`, `pid`, `longfile`, `shortfile`, `UTC`. Any other
|
||||
keywords will be silently ignored. `pid` will tag log messages with process
|
||||
identifier which useful with `rclone mount --daemon`. Other accepted
|
||||
options are explained in the [go documentation](https://pkg.go.dev/log#pkg-constants).
|
||||
The default log format is "`date`,`time`".
|
||||
Comma separated list of log format options. The accepted options are:
|
||||
|
||||
- `date` - Add a date in the format YYYY/MM/YY to the log.
|
||||
- `time` - Add a time to the log in format HH:MM:SS.
|
||||
- `microseconds` - Add microseconds to the time in format HH:MM:SS.SSSSSS.
|
||||
- `UTC` - Make the logs in UTC not localtime.
|
||||
- `longfile` - Adds the source file and line number of the log statement.
|
||||
- `shortfile` - Adds the source file and line number of the log statement.
|
||||
- `pid` - Add the process ID to the log - useful with `rclone mount --daemon`.
|
||||
- `nolevel` - Don't add the level to the log.
|
||||
- `json` - Equivalent to adding `--use-json-log`
|
||||
|
||||
They are added to the log line in the order above.
|
||||
|
||||
The default log format is `"date,time"`.
|
||||
|
||||
### --log-level LEVEL ###
|
||||
|
||||
@@ -1499,10 +1515,90 @@ warnings and significant events.
|
||||
|
||||
`ERROR` is equivalent to `-q`. It only outputs error messages.
|
||||
|
||||
### --windows-event-log LEVEL ###
|
||||
|
||||
If this is configured (the default is `OFF`) then logs of this level
|
||||
and above will be logged to the Windows event log in **addition** to
|
||||
the normal logs. These will be logged in JSON format as described
|
||||
below regardless of what format the main logs are configured for.
|
||||
|
||||
The Windows event log only has 3 levels of severity `Info`, `Warning`
|
||||
and `Error`. If enabled we map rclone levels like this.
|
||||
|
||||
- `Error` ← `ERROR` (and above)
|
||||
- `Warning` ← `WARNING` (note that this level is defined but not currently used).
|
||||
- `Info` ← `NOTICE`, `INFO` and `DEBUG`.
|
||||
|
||||
Rclone will declare its log source as "rclone" if it is has enough
|
||||
permissions to create the registry key needed. If not then logs will
|
||||
appear as "Application". You can run `rclone version --windows-event-log DEBUG`
|
||||
once as administrator to create the registry key in advance.
|
||||
|
||||
**Note** that the `--windows-event-log` level must be greater (more
|
||||
severe) than or equal to the `--log-level`. For example to log DEBUG
|
||||
to a log file but ERRORs to the event log you would use
|
||||
|
||||
--log-file rclone.log --log-level DEBUG --windows-event-log ERROR
|
||||
|
||||
This option is only supported Windows platforms.
|
||||
|
||||
### --use-json-log ###
|
||||
|
||||
This switches the log format to JSON for rclone. The fields of json log
|
||||
are level, msg, source, time.
|
||||
This switches the log format to JSON for rclone. The fields of JSON
|
||||
log are `level`, `msg`, `source`, `time`. The JSON logs will be
|
||||
printed on a single line, but are shown expanded here for clarity.
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:30:51.036237518+01:00",
|
||||
"level": "debug",
|
||||
"msg": "4 go routines active\n",
|
||||
"source": "cmd/cmd.go:298"
|
||||
}
|
||||
```
|
||||
|
||||
Completed data transfer logs will have extra `size` information. Logs
|
||||
which are about a particular object will have `object` and
|
||||
`objectType` fields also.
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:38:05.540846352+01:00",
|
||||
"level": "info",
|
||||
"msg": "Copied (new) to: file2.txt",
|
||||
"size": 6,
|
||||
"object": "file.txt",
|
||||
"objectType": "*local.Object",
|
||||
"source": "operations/copy.go:368"
|
||||
}
|
||||
```
|
||||
|
||||
Stats logs will contain a `stats` field which is the same as
|
||||
returned from the rc call [core/stats](/rc/#core-stats).
|
||||
|
||||
```json
|
||||
{
|
||||
"time": "2025-05-13T17:38:05.540912847+01:00",
|
||||
"level": "info",
|
||||
"msg": "...text version of the stats...",
|
||||
"stats": {
|
||||
"bytes": 6,
|
||||
"checks": 0,
|
||||
"deletedDirs": 0,
|
||||
"deletes": 0,
|
||||
"elapsedTime": 0.000904825,
|
||||
...truncated for clarity...
|
||||
"totalBytes": 6,
|
||||
"totalChecks": 0,
|
||||
"totalTransfers": 1,
|
||||
"transferTime": 0.000882794,
|
||||
"transfers": 1
|
||||
},
|
||||
"source": "accounting/stats.go:569"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
### --low-level-retries NUMBER ###
|
||||
|
||||
@@ -1557,6 +1653,32 @@ Setting `--max-buffer-memory` allows the buffer memory to be
|
||||
controlled so that it doesn't overwhelm the machine and allows
|
||||
`--transfers` to be set large.
|
||||
|
||||
### --max-connections=N ###
|
||||
|
||||
This sets the maximum number of concurrent calls to the backend API.
|
||||
It may not map 1:1 to TCP or HTTP connections depending on the backend
|
||||
in use and the use of HTTP1 vs HTTP2.
|
||||
|
||||
When downloading files, backends only limit the initial opening of the
|
||||
stream. The bulk data download is not counted as a connection. This
|
||||
means that the `--max--connections` flag won't limit the total number
|
||||
of downloads.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure `--max-connections` is
|
||||
one more than the sum of `--transfers` and `--checkers`.
|
||||
|
||||
If you use `--check-first` then `--max-connections` just needs to be
|
||||
one more than the maximum of `--checkers` and `--transfers`.
|
||||
|
||||
So for `--max-connections 3` you'd use `--checkers 2 --transfers 2
|
||||
--check-first` or `--checkers 1 --transfers 1`.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads to limit the number of simultaneous parts being transferred.
|
||||
|
||||
### --max-delete=N ###
|
||||
|
||||
This tells rclone not to delete more than N files. If that limit is
|
||||
|
||||
@@ -22,6 +22,20 @@ See the [remote setup docs](/remote_setup/) for more info.
|
||||
|
||||
This has now been documented in its own [remote setup page](/remote_setup/).
|
||||
|
||||
### How can I get rid of the "Config file not found" notice?
|
||||
|
||||
If you see a notice like 'NOTICE: Config file "rclone.conf" not found', this
|
||||
means you have not configured any remotes.
|
||||
|
||||
If you need to configure a remote, see the [config help docs](/docs/#configure).
|
||||
|
||||
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
|
||||
you can create an empty config file to get rid of this notice, for example:
|
||||
|
||||
```
|
||||
rclone config touch
|
||||
```
|
||||
|
||||
### Can rclone sync directly from drive to s3 ###
|
||||
|
||||
Rclone can sync between two remote cloud storage systems just fine.
|
||||
|
||||
@@ -14,6 +14,11 @@ Google Photos.
|
||||
limitations, so please read the [limitations section](#limitations)
|
||||
carefully to make sure it is suitable for your use.
|
||||
|
||||
**NB** From March 31, 2025 rclone can only download photos it
|
||||
uploaded. This limitation is due to policy changes at Google. You may
|
||||
need to run `rclone config reconnect remote:` to make rclone work
|
||||
again after upgrading to rclone v1.70.
|
||||
|
||||
## Configuration
|
||||
|
||||
The initial setup for google cloud storage involves getting a token from Google Photos
|
||||
@@ -528,6 +533,11 @@ videos or images or formats that Google Photos doesn't understand,
|
||||
rclone will upload the file, then Google Photos will give an error
|
||||
when it is put turned into a media item.
|
||||
|
||||
**NB** From March 31, 2025 rclone can only download photos it
|
||||
uploaded. This limitation is due to policy changes at Google. You may
|
||||
need to run `rclone config reconnect remote:` to make rclone work
|
||||
again after upgrading to rclone v1.70.
|
||||
|
||||
Note that all media items uploaded to Google Photos through the API
|
||||
are stored in full resolution at "original quality" and **will** count
|
||||
towards your storage quota in your Google Account. The API does
|
||||
|
||||
@@ -5551,7 +5551,7 @@ source).
|
||||
|
||||
This has the following consequences:
|
||||
|
||||
- Using `rclone rcat` will fail as the medatada doesn't match after upload
|
||||
- Using `rclone rcat` will fail as the metadata doesn't match after upload
|
||||
- Uploading files with `rclone mount` will fail for the same reason
|
||||
- This can worked around by using `--vfs-cache-mode writes` or `--vfs-cache-mode full` or setting `--s3-upload-cutoff` large
|
||||
- Files uploaded via a multipart upload won't have their modtimes
|
||||
|
||||
@@ -104,11 +104,11 @@ To copy a local directory to an WebDAV directory called backup
|
||||
### Modification times and hashes
|
||||
|
||||
Plain WebDAV does not support modified times. However when used with
|
||||
Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
|
||||
Fastmail Files, ownCloud or Nextcloud rclone will support modified times.
|
||||
|
||||
Likewise plain WebDAV does not support hashes, however when used with
|
||||
Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
|
||||
Depending on the exact version of Owncloud or Nextcloud hashes may
|
||||
Fastmail Files, ownCloud or Nextcloud rclone will support SHA1 and MD5 hashes.
|
||||
Depending on the exact version of ownCloud or Nextcloud hashes may
|
||||
appear on all objects, or only on objects which had a hash uploaded
|
||||
with them.
|
||||
|
||||
@@ -355,19 +355,28 @@ this as the password.
|
||||
|
||||
Fastmail supports modified times using the `X-OC-Mtime` header.
|
||||
|
||||
### Owncloud
|
||||
### ownCloud
|
||||
|
||||
Click on the settings cog in the bottom right of the page and this
|
||||
will show the WebDAV URL that rclone needs in the config step. It
|
||||
will look something like `https://example.com/remote.php/webdav/`.
|
||||
|
||||
Owncloud supports modified times using the `X-OC-Mtime` header.
|
||||
ownCloud supports modified times using the `X-OC-Mtime` header.
|
||||
|
||||
### Nextcloud
|
||||
|
||||
This is configured in an identical way to Owncloud. Note that
|
||||
This is configured in an identical way to ownCloud. Note that
|
||||
Nextcloud initially did not support streaming of files (`rcat`) whereas
|
||||
Owncloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
|
||||
ownCloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
|
||||
|
||||
### ownCloud Infinite Scale
|
||||
|
||||
The WebDAV URL for Infinite Scale can be found in the details panel of
|
||||
any space in Infinite Scale, if the display was enabled in the personal
|
||||
settings of the user through a checkbox there.
|
||||
|
||||
Infinite Scale works with the chunking [tus](https://tus.io) upload protocol.
|
||||
The chunk size is currently fixed 10 MB.
|
||||
|
||||
### Sharepoint Online
|
||||
|
||||
|
||||
46
fs/config.go
46
fs/config.go
@@ -545,31 +545,11 @@ var ConfigOptionsInfo = Options{{
|
||||
Help: "Add partial-suffix to temporary file name when --inplace is not used",
|
||||
Groups: "Copy",
|
||||
}, {
|
||||
Name: "max_connections",
|
||||
Help: strings.ReplaceAll(`Maximum number of simultaneous connections, 0 for unlimited.
|
||||
|
||||
This sets the maximum number of connections made to the backend on a
|
||||
per backend basis. Connections in this case are calls to the backend
|
||||
API and may not map 1:1 to TCP or HTTP connections depending on the
|
||||
backend in use.
|
||||
|
||||
Note that it is possible to cause deadlocks with this setting so it
|
||||
should be used with care.
|
||||
|
||||
If you are doing a sync or copy then make sure |--max-connections| is
|
||||
one more than the sum of |--transfers| and |--checkers|.
|
||||
|
||||
If you use |--check-first| then |--max-connections| just needs to be
|
||||
one more than the maximum of |--checkers| and |--transfers|.
|
||||
|
||||
So for |--max-connections 3| you'd use |--checkers 2 --transfers 2
|
||||
--check-first| or |--checkers 1 --transfers 1|.
|
||||
|
||||
Setting this flag can be useful for backends which do multipart
|
||||
uploads or downloads to limit the number of total connections.
|
||||
`, "|", "`"),
|
||||
Name: "max_connections",
|
||||
Help: "Maximum number of simultaneous backend API connections, 0 for unlimited.",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
Groups: "Networking",
|
||||
}}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
@@ -691,9 +671,13 @@ func init() {
|
||||
RegisterGlobalOptions(OptionsInfo{Name: "main", Opt: globalConfig, Options: ConfigOptionsInfo, Reload: globalConfig.Reload})
|
||||
|
||||
// initial guess at log level from the flags
|
||||
globalConfig.LogLevel = initialLogLevel()
|
||||
globalConfig.LogLevel = InitialLogLevel()
|
||||
}
|
||||
|
||||
// LogReload is written by fs/log to set variables which should really
|
||||
// be there but we can't move due to them being visible here in the rc.
|
||||
var LogReload = func(*ConfigInfo) error { return nil }
|
||||
|
||||
// Reload assumes the config has been edited and does what is necessary to make it live
|
||||
func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
// Set -vv if --dump is in use
|
||||
@@ -707,11 +691,6 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
ci.StatsLogLevel = LogLevelNotice
|
||||
}
|
||||
|
||||
// If --use-json-log then start the JSON logger
|
||||
if ci.UseJSONLog {
|
||||
InstallJSONLogger(ci.LogLevel)
|
||||
}
|
||||
|
||||
// Check --compare-dest and --copy-dest
|
||||
if len(ci.CompareDest) > 0 && len(ci.CopyDest) > 0 {
|
||||
return fmt.Errorf("can't use --compare-dest with --copy-dest")
|
||||
@@ -751,13 +730,12 @@ func (ci *ConfigInfo) Reload(ctx context.Context) error {
|
||||
nonZero(&ci.Transfers)
|
||||
nonZero(&ci.Checkers)
|
||||
|
||||
return nil
|
||||
return LogReload(ci)
|
||||
}
|
||||
|
||||
// Initial logging level
|
||||
//
|
||||
// Perform a simple check for debug flags to enable debug logging during the flag initialization
|
||||
func initialLogLevel() LogLevel {
|
||||
// InitialLogLevel performs a simple check for debug flags to enable
|
||||
// debug logging during the flag initialization.
|
||||
func InitialLogLevel() LogLevel {
|
||||
logLevel := LogLevelNotice
|
||||
for argIndex, arg := range os.Args {
|
||||
if strings.HasPrefix(arg, "-vv") && strings.TrimRight(arg, "v") == "-" {
|
||||
|
||||
126
fs/log.go
126
fs/log.go
@@ -4,10 +4,9 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
|
||||
@@ -33,6 +32,7 @@ const (
|
||||
LogLevelNotice // Normal logging, -q suppresses
|
||||
LogLevelInfo // Transfers, needs -v
|
||||
LogLevelDebug // Debug level, needs -vv
|
||||
LogLevelOff
|
||||
)
|
||||
|
||||
type logLevelChoices struct{}
|
||||
@@ -47,6 +47,7 @@ func (logLevelChoices) Choices() []string {
|
||||
LogLevelNotice: "NOTICE",
|
||||
LogLevelInfo: "INFO",
|
||||
LogLevelDebug: "DEBUG",
|
||||
LogLevelOff: "OFF",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,19 +55,33 @@ func (logLevelChoices) Type() string {
|
||||
return "LogLevel"
|
||||
}
|
||||
|
||||
// LogPrintPid enables process pid in log
|
||||
var LogPrintPid = false
|
||||
// slogLevel definitions defined as slog.Level constants.
|
||||
// The integer values determine severity for filtering.
|
||||
// Lower values are less severe (e.g., Debug), higher values are more severe (e.g., Emergency).
|
||||
// We fit our extra values into slog's scale.
|
||||
const (
|
||||
// slog.LevelDebug slog.Level = -4
|
||||
// slog.LevelInfo slog.Level = 0
|
||||
SlogLevelNotice = slog.Level(2) // Between Info (0) and Warn (4)
|
||||
// slog.LevelWarn slog.Level = 4
|
||||
// slog.LevelError slog.Level = 8
|
||||
SlogLevelCritical = slog.Level(12) // More severe than Error
|
||||
SlogLevelAlert = slog.Level(16) // More severe than Critical
|
||||
SlogLevelEmergency = slog.Level(20) // Most severe
|
||||
SlogLevelOff = slog.Level(24) // A very high value
|
||||
)
|
||||
|
||||
// InstallJSONLogger is a hook that --use-json-log calls
|
||||
var InstallJSONLogger = func(logLevel LogLevel) {}
|
||||
|
||||
// LogOutput sends the text to the logger of level
|
||||
var LogOutput = func(level LogLevel, text string) {
|
||||
text = fmt.Sprintf("%-6s: %s", level, text)
|
||||
if LogPrintPid {
|
||||
text = fmt.Sprintf("[%d] %s", os.Getpid(), text)
|
||||
}
|
||||
_ = log.Output(4, text)
|
||||
// Map our level numbers to slog level numbers
|
||||
var levelToSlog = []slog.Level{
|
||||
LogLevelEmergency: SlogLevelEmergency,
|
||||
LogLevelAlert: SlogLevelAlert,
|
||||
LogLevelCritical: SlogLevelCritical,
|
||||
LogLevelError: slog.LevelError,
|
||||
LogLevelWarning: slog.LevelWarn,
|
||||
LogLevelNotice: SlogLevelNotice,
|
||||
LogLevelInfo: slog.LevelInfo,
|
||||
LogLevelDebug: slog.LevelDebug,
|
||||
LogLevelOff: SlogLevelOff,
|
||||
}
|
||||
|
||||
// LogValueItem describes keyed item for a JSON log entry
|
||||
@@ -108,76 +123,45 @@ func (j LogValueItem) String() string {
|
||||
return fmt.Sprint(j.value)
|
||||
}
|
||||
|
||||
func logLogrus(level LogLevel, text string, fields logrus.Fields) {
|
||||
switch level {
|
||||
case LogLevelDebug:
|
||||
logrus.WithFields(fields).Debug(text)
|
||||
case LogLevelInfo:
|
||||
logrus.WithFields(fields).Info(text)
|
||||
case LogLevelNotice, LogLevelWarning:
|
||||
logrus.WithFields(fields).Warn(text)
|
||||
case LogLevelError:
|
||||
logrus.WithFields(fields).Error(text)
|
||||
case LogLevelCritical:
|
||||
logrus.WithFields(fields).Fatal(text)
|
||||
case LogLevelEmergency, LogLevelAlert:
|
||||
logrus.WithFields(fields).Panic(text)
|
||||
// LogLevelToSlog converts an rclone log level to log/slog log level.
|
||||
func LogLevelToSlog(level LogLevel) slog.Level {
|
||||
slogLevel := slog.LevelError
|
||||
// NB level is unsigned so we don't check < 0 here
|
||||
if int(level) < len(levelToSlog) {
|
||||
slogLevel = levelToSlog[level]
|
||||
}
|
||||
return slogLevel
|
||||
}
|
||||
|
||||
func logLogrusWithObject(level LogLevel, o any, text string, fields logrus.Fields) {
|
||||
func logSlog(level LogLevel, text string, attrs []any) {
|
||||
slog.Log(context.Background(), LogLevelToSlog(level), text, attrs...)
|
||||
}
|
||||
|
||||
func logSlogWithObject(level LogLevel, o any, text string, attrs []any) {
|
||||
if o != nil {
|
||||
if fields == nil {
|
||||
fields = logrus.Fields{}
|
||||
}
|
||||
fields["object"] = fmt.Sprintf("%+v", o)
|
||||
fields["objectType"] = fmt.Sprintf("%T", o)
|
||||
attrs = slices.Concat(attrs, []any{
|
||||
"object", fmt.Sprintf("%+v", o),
|
||||
"objectType", fmt.Sprintf("%T", o),
|
||||
})
|
||||
}
|
||||
logLogrus(level, text, fields)
|
||||
}
|
||||
|
||||
func logJSON(level LogLevel, o any, text string) {
|
||||
logLogrusWithObject(level, o, text, nil)
|
||||
}
|
||||
|
||||
func logJSONf(level LogLevel, o any, text string, args ...any) {
|
||||
text = fmt.Sprintf(text, args...)
|
||||
fields := logrus.Fields{}
|
||||
for _, arg := range args {
|
||||
if item, ok := arg.(LogValueItem); ok {
|
||||
fields[item.key] = item.value
|
||||
}
|
||||
}
|
||||
logLogrusWithObject(level, o, text, fields)
|
||||
}
|
||||
|
||||
func logPlain(level LogLevel, o any, text string) {
|
||||
if o != nil {
|
||||
text = fmt.Sprintf("%v: %s", o, text)
|
||||
}
|
||||
LogOutput(level, text)
|
||||
}
|
||||
|
||||
func logPlainf(level LogLevel, o any, text string, args ...any) {
|
||||
logPlain(level, o, fmt.Sprintf(text, args...))
|
||||
logSlog(level, text, attrs)
|
||||
}
|
||||
|
||||
// LogPrint produces a log string from the arguments passed in
|
||||
func LogPrint(level LogLevel, o any, text string) {
|
||||
if GetConfig(context.TODO()).UseJSONLog {
|
||||
logJSON(level, o, text)
|
||||
} else {
|
||||
logPlain(level, o, text)
|
||||
}
|
||||
logSlogWithObject(level, o, text, nil)
|
||||
}
|
||||
|
||||
// LogPrintf produces a log string from the arguments passed in
|
||||
func LogPrintf(level LogLevel, o any, text string, args ...any) {
|
||||
if GetConfig(context.TODO()).UseJSONLog {
|
||||
logJSONf(level, o, text, args...)
|
||||
} else {
|
||||
logPlainf(level, o, text, args...)
|
||||
text = fmt.Sprintf(text, args...)
|
||||
var fields []any
|
||||
for _, arg := range args {
|
||||
if item, ok := arg.(LogValueItem); ok {
|
||||
fields = append(fields, item.key, item.value)
|
||||
}
|
||||
}
|
||||
logSlogWithObject(level, o, text, fields)
|
||||
}
|
||||
|
||||
// LogLevelPrint writes logs at the given level
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var loggerInstalled = false
|
||||
|
||||
// InstallJSONLogger installs the JSON logger at the specified log level
|
||||
func InstallJSONLogger(logLevel fs.LogLevel) {
|
||||
if !loggerInstalled {
|
||||
logrus.AddHook(NewCallerHook())
|
||||
loggerInstalled = true
|
||||
}
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
|
||||
})
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
switch logLevel {
|
||||
case fs.LogLevelEmergency, fs.LogLevelAlert:
|
||||
logrus.SetLevel(logrus.PanicLevel)
|
||||
case fs.LogLevelCritical:
|
||||
logrus.SetLevel(logrus.FatalLevel)
|
||||
case fs.LogLevelError:
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
case fs.LogLevelWarning, fs.LogLevelNotice:
|
||||
logrus.SetLevel(logrus.WarnLevel)
|
||||
case fs.LogLevelInfo:
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
case fs.LogLevelDebug:
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
}
|
||||
|
||||
// install hook in fs to call to avoid circular dependency
|
||||
func init() {
|
||||
fs.InstallJSONLogger = InstallJSONLogger
|
||||
}
|
||||
|
||||
// CallerHook for log the calling file and line of the fine
|
||||
type CallerHook struct {
|
||||
Field string
|
||||
Skip int
|
||||
levels []logrus.Level
|
||||
}
|
||||
|
||||
// NewCallerHook use to make a hook
|
||||
func NewCallerHook(levels ...logrus.Level) logrus.Hook {
|
||||
hook := CallerHook{
|
||||
Field: "source",
|
||||
Skip: 7,
|
||||
levels: levels,
|
||||
}
|
||||
if len(hook.levels) == 0 {
|
||||
hook.levels = logrus.AllLevels
|
||||
}
|
||||
return &hook
|
||||
}
|
||||
|
||||
// Levels implement applied hook to which levels
|
||||
func (h *CallerHook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
// Fire logs the information of context (filename and line)
|
||||
func (h *CallerHook) Fire(entry *logrus.Entry) error {
|
||||
entry.Data[h.Field] = findCaller(h.Skip)
|
||||
return nil
|
||||
}
|
||||
|
||||
// findCaller ignores the caller relevant to logrus or fslog then find out the exact caller
|
||||
func findCaller(skip int) string {
|
||||
file := ""
|
||||
line := 0
|
||||
for i := range 10 {
|
||||
file, line = getCaller(skip + i)
|
||||
if !strings.HasPrefix(file, "logrus") && !strings.Contains(file, "log.go") {
|
||||
break
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
|
||||
func getCaller(skip int) (string, int) {
|
||||
_, file, line, ok := runtime.Caller(skip)
|
||||
// fmt.Println(file,":",line)
|
||||
if !ok {
|
||||
return "", 0
|
||||
}
|
||||
n := 0
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
n++
|
||||
if n >= 2 {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return file, line
|
||||
}
|
||||
15
fs/log/event_log.go
Normal file
15
fs/log/event_log.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Windows event logging stubs for non windows machines
|
||||
|
||||
//go:build !windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Starts windows event log if configured.
|
||||
func startWindowsEventLog(*OutputHandler) error {
|
||||
return fmt.Errorf("windows event log not supported on %s platform", runtime.GOOS)
|
||||
}
|
||||
79
fs/log/event_log_windows.go
Normal file
79
fs/log/event_log_windows.go
Normal file
@@ -0,0 +1,79 @@
|
||||
// Windows event logging
|
||||
|
||||
//go:build windows
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc/eventlog"
|
||||
)
|
||||
|
||||
const (
|
||||
errorID = uint32(windows.ERROR_INTERNAL_ERROR)
|
||||
infoID = uint32(windows.ERROR_SUCCESS)
|
||||
sourceName = "rclone"
|
||||
)
|
||||
|
||||
var (
|
||||
windowsEventLog *eventlog.Log
|
||||
)
|
||||
|
||||
func startWindowsEventLog(handler *OutputHandler) error {
|
||||
// Don't install Windows event log if it is disabled.
|
||||
if Opt.WindowsEventLogLevel == fs.LogLevelOff {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Install the event source - we don't care if this fails as Windows has sensible fallbacks.
|
||||
_ = eventlog.InstallAsEventCreate(sourceName, eventlog.Info|eventlog.Warning|eventlog.Error)
|
||||
|
||||
// Open the event log
|
||||
// If sourceName didn't get registered then Windows will use "Application" instead which is fine.
|
||||
// Though in my tests it seemsed to use sourceName regardless.
|
||||
elog, err := eventlog.Open(sourceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open event log: %w", err)
|
||||
}
|
||||
|
||||
// Set the global for the handler
|
||||
windowsEventLog = elog
|
||||
|
||||
// Close it on exit
|
||||
atexit.Register(func() {
|
||||
err := elog.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to close Windows event log: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Add additional JSON logging to the eventLog handler.
|
||||
handler.AddOutput(true, eventLog)
|
||||
|
||||
fs.Infof(nil, "Logging to Windows event log at level %v", Opt.WindowsEventLogLevel)
|
||||
return nil
|
||||
}
|
||||
|
||||
// We use levels ERROR, NOTICE, INFO, DEBUG
|
||||
// Need to map to ERROR, WARNING, INFO
|
||||
func eventLog(level slog.Level, text string) {
|
||||
// Check to see if this level is required
|
||||
if level < fs.LogLevelToSlog(Opt.WindowsEventLogLevel) {
|
||||
return
|
||||
}
|
||||
|
||||
// Now log to windows eventLog
|
||||
switch level {
|
||||
case fs.SlogLevelEmergency, fs.SlogLevelAlert, fs.SlogLevelCritical, slog.LevelError:
|
||||
_ = windowsEventLog.Error(errorID, text)
|
||||
case slog.LevelWarn:
|
||||
_ = windowsEventLog.Warning(infoID, text)
|
||||
case fs.SlogLevelNotice, slog.LevelInfo, slog.LevelDebug:
|
||||
_ = windowsEventLog.Info(infoID, text)
|
||||
}
|
||||
}
|
||||
128
fs/log/log.go
128
fs/log/log.go
@@ -3,15 +3,14 @@ package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
@@ -22,7 +21,7 @@ var OptionsInfo = fs.Options{{
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
Name: "log_format",
|
||||
Default: "date,time",
|
||||
Default: logFormatDate | logFormatTime,
|
||||
Help: "Comma separated list of log format options",
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
@@ -40,15 +39,27 @@ var OptionsInfo = fs.Options{{
|
||||
Default: false,
|
||||
Help: "Activate systemd integration for the logger",
|
||||
Groups: "Logging",
|
||||
}, {
|
||||
Name: "windows_event_log_level",
|
||||
Default: fs.LogLevelOff,
|
||||
Help: "Windows Event Log level DEBUG|INFO|NOTICE|ERROR|OFF",
|
||||
Groups: "Logging",
|
||||
Hide: func() fs.OptionVisibility {
|
||||
if runtime.GOOS == "windows" {
|
||||
return 0
|
||||
}
|
||||
return fs.OptionHideBoth
|
||||
}(),
|
||||
}}
|
||||
|
||||
// Options contains options for controlling the logging
|
||||
type Options struct {
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
Format string `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
|
||||
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
Format logFormat `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
SyslogFacility string `config:"syslog_facility"` // Facility for syslog, e.g. KERN,USER,...
|
||||
LogSystemdSupport bool `config:"log_systemd"` // set if using systemd logging
|
||||
WindowsEventLogLevel fs.LogLevel `config:"windows_event_log_level"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -58,6 +69,37 @@ func init() {
|
||||
// Opt is the options for the logger
|
||||
var Opt Options
|
||||
|
||||
// enum for the log format
|
||||
type logFormat = fs.Bits[logFormatChoices]
|
||||
|
||||
const (
|
||||
logFormatDate logFormat = 1 << iota
|
||||
logFormatTime
|
||||
logFormatMicroseconds
|
||||
logFormatUTC
|
||||
logFormatLongFile
|
||||
logFormatShortFile
|
||||
logFormatPid
|
||||
logFormatNoLevel
|
||||
logFormatJSON
|
||||
)
|
||||
|
||||
type logFormatChoices struct{}
|
||||
|
||||
func (logFormatChoices) Choices() []fs.BitsChoicesInfo {
|
||||
return []fs.BitsChoicesInfo{
|
||||
{Bit: uint64(logFormatDate), Name: "date"},
|
||||
{Bit: uint64(logFormatTime), Name: "time"},
|
||||
{Bit: uint64(logFormatMicroseconds), Name: "microseconds"},
|
||||
{Bit: uint64(logFormatUTC), Name: "UTC"},
|
||||
{Bit: uint64(logFormatLongFile), Name: "longfile"},
|
||||
{Bit: uint64(logFormatShortFile), Name: "shortfile"},
|
||||
{Bit: uint64(logFormatPid), Name: "pid"},
|
||||
{Bit: uint64(logFormatNoLevel), Name: "nolevel"},
|
||||
{Bit: uint64(logFormatJSON), Name: "json"},
|
||||
}
|
||||
}
|
||||
|
||||
// fnName returns the name of the calling +2 function
|
||||
func fnName() string {
|
||||
pc, _, _, ok := runtime.Caller(2)
|
||||
@@ -114,31 +156,29 @@ func Stack(o any, info string) {
|
||||
fs.LogPrintf(fs.LogLevelDebug, o, "%s\nStack trace:\n%s", info, buf)
|
||||
}
|
||||
|
||||
// This is called from fs when the config is reloaded
|
||||
//
|
||||
// The config should really be here but we can't move it as it is
|
||||
// externally visible in the rc.
|
||||
func logReload(ci *fs.ConfigInfo) error {
|
||||
Handler.SetLevel(fs.LogLevelToSlog(ci.LogLevel))
|
||||
|
||||
if Opt.WindowsEventLogLevel != fs.LogLevelOff && Opt.WindowsEventLogLevel > ci.LogLevel {
|
||||
return fmt.Errorf("--windows-event-log-level %q must be >= --log-level %q", Opt.WindowsEventLogLevel, ci.LogLevel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
fs.LogReload = logReload
|
||||
}
|
||||
|
||||
// InitLogging start the logging as per the command line flags
|
||||
func InitLogging() {
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if strings.Contains(flagsStr, ",date,") {
|
||||
flags |= log.Ldate
|
||||
}
|
||||
if strings.Contains(flagsStr, ",time,") {
|
||||
flags |= log.Ltime
|
||||
}
|
||||
if strings.Contains(flagsStr, ",microseconds,") {
|
||||
flags |= log.Lmicroseconds
|
||||
}
|
||||
if strings.Contains(flagsStr, ",UTC,") {
|
||||
flags |= log.LUTC
|
||||
}
|
||||
if strings.Contains(flagsStr, ",longfile,") {
|
||||
flags |= log.Llongfile
|
||||
}
|
||||
if strings.Contains(flagsStr, ",shortfile,") {
|
||||
flags |= log.Lshortfile
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
|
||||
fs.LogPrintPid = strings.Contains(flagsStr, ",pid,")
|
||||
// Note that ci only has the defaults in at this point
|
||||
// We set real values in logReload
|
||||
ci := fs.GetConfig(context.Background())
|
||||
|
||||
// Log file output
|
||||
if Opt.File != "" {
|
||||
@@ -150,17 +190,27 @@ func InitLogging() {
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to seek log file to end: %v", err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
logrus.SetOutput(f)
|
||||
redirectStderr(f)
|
||||
Handler.setWriter(f)
|
||||
}
|
||||
|
||||
// --use-json-log implies JSON formatting
|
||||
if ci.UseJSONLog {
|
||||
Opt.Format |= logFormatJSON
|
||||
}
|
||||
|
||||
// Set slog level to initial log level
|
||||
Handler.SetLevel(fs.LogLevelToSlog(fs.InitialLogLevel()))
|
||||
|
||||
// Set the format to the configured format
|
||||
Handler.setFormat(Opt.Format)
|
||||
|
||||
// Syslog output
|
||||
if Opt.UseSyslog {
|
||||
if Opt.File != "" {
|
||||
fs.Fatalf(nil, "Can't use --syslog and --log-file together")
|
||||
}
|
||||
startSysLog()
|
||||
startSysLog(Handler)
|
||||
}
|
||||
|
||||
// Activate systemd logger support if systemd invocation ID is
|
||||
@@ -173,7 +223,15 @@ func InitLogging() {
|
||||
|
||||
// Systemd logging output
|
||||
if Opt.LogSystemdSupport {
|
||||
startSystemdLog()
|
||||
startSystemdLog(Handler)
|
||||
}
|
||||
|
||||
// Windows event logging
|
||||
if Opt.WindowsEventLogLevel != fs.LogLevelOff {
|
||||
err := startWindowsEventLog(Handler)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start windows event log: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
391
fs/log/slog.go
Normal file
391
fs/log/slog.go
Normal file
@@ -0,0 +1,391 @@
|
||||
// Interfaces for the slog package
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Handler is the standard handler for the logging.
|
||||
var Handler = defaultHandler()
|
||||
|
||||
// Create the default OutputHandler
|
||||
//
|
||||
// This logs to stderr with standard go logger format at level INFO.
|
||||
//
|
||||
// This will be adjusted by InitLogging to be the configured levels
|
||||
// but it is important we have a logger running regardless of whether
|
||||
// InitLogging has been called yet or not.
|
||||
func defaultHandler() *OutputHandler {
|
||||
// Default options for default handler
|
||||
var opts = &slog.HandlerOptions{
|
||||
Level: fs.LogLevelToSlog(fs.InitialLogLevel()),
|
||||
}
|
||||
|
||||
// Create our handler
|
||||
h := NewOutputHandler(os.Stderr, opts, logFormatDate|logFormatTime)
|
||||
|
||||
// Set the slog default handler
|
||||
slog.SetDefault(slog.New(h))
|
||||
|
||||
// Make log.Printf logs at level Notice
|
||||
slog.SetLogLoggerLevel(fs.SlogLevelNotice)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Map slog level names to string
|
||||
var slogNames = map[slog.Level]string{
|
||||
slog.LevelDebug: "DEBUG",
|
||||
slog.LevelInfo: "INFO",
|
||||
fs.SlogLevelNotice: "NOTICE",
|
||||
slog.LevelWarn: "WARNING",
|
||||
slog.LevelError: "ERROR",
|
||||
fs.SlogLevelCritical: "CRITICAL",
|
||||
fs.SlogLevelAlert: "ALERT",
|
||||
fs.SlogLevelEmergency: "EMERGENCY",
|
||||
}
|
||||
|
||||
// Convert a slog level to string using rclone's extra levels
|
||||
func slogLevelToString(level slog.Level) string {
|
||||
levelStr := slogNames[level]
|
||||
if levelStr == "" {
|
||||
levelStr = level.String()
|
||||
}
|
||||
return levelStr
|
||||
}
|
||||
|
||||
// ReplaceAttr function to customize the level key's string value in logs
|
||||
func mapLogLevelNames(groups []string, a slog.Attr) slog.Attr {
|
||||
if a.Key == slog.LevelKey {
|
||||
level, ok := a.Value.Any().(slog.Level)
|
||||
if !ok {
|
||||
return a
|
||||
}
|
||||
levelStr := strings.ToLower(slogLevelToString(level))
|
||||
a.Value = slog.StringValue(levelStr)
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// get the file and line number of the caller skipping skip levels
|
||||
func getCaller(skip int) string {
|
||||
var pc [64]uintptr
|
||||
n := runtime.Callers(skip, pc[:])
|
||||
if n == 0 {
|
||||
return ""
|
||||
}
|
||||
frames := runtime.CallersFrames(pc[:n])
|
||||
var more = true
|
||||
var frame runtime.Frame
|
||||
for more {
|
||||
frame, more = frames.Next()
|
||||
|
||||
file := frame.File
|
||||
if strings.Contains(file, "/log/") || strings.HasSuffix(file, "log.go") {
|
||||
continue
|
||||
}
|
||||
line := frame.Line
|
||||
|
||||
// shorten file name
|
||||
n := 0
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
n++
|
||||
if n >= 2 {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// OutputHandler is a slog.Handler that writes log records in a format
|
||||
// identical to the standard library's `log` package (e.g., "YYYY/MM/DD HH:MM:SS message").
|
||||
//
|
||||
// It can also write logs in JSON format identical to logrus.
|
||||
type OutputHandler struct {
|
||||
opts slog.HandlerOptions
|
||||
levelVar slog.LevelVar
|
||||
writer io.Writer
|
||||
mu sync.Mutex
|
||||
output []outputFn // log to writer if empty or the last item
|
||||
outputExtra []outputExtra // log to all these additional places
|
||||
format logFormat
|
||||
jsonBuf bytes.Buffer
|
||||
jsonHandler *slog.JSONHandler
|
||||
}
|
||||
|
||||
// Records the type and function pointer for extra logging output.
|
||||
type outputExtra struct {
|
||||
json bool
|
||||
output outputFn
|
||||
}
|
||||
|
||||
// Define the type of the override logger
|
||||
type outputFn func(level slog.Level, text string)
|
||||
|
||||
// NewOutputHandler creates a new OutputHandler with the specified flags.
|
||||
//
|
||||
// This is designed to use log/slog but produce output which is
|
||||
// backwards compatible with previous rclone versions.
|
||||
//
|
||||
// If opts is nil, default options are used, with Level set to
|
||||
// slog.LevelInfo.
|
||||
func NewOutputHandler(out io.Writer, opts *slog.HandlerOptions, format logFormat) *OutputHandler {
|
||||
h := &OutputHandler{
|
||||
writer: out,
|
||||
format: format,
|
||||
}
|
||||
if opts != nil {
|
||||
h.opts = *opts
|
||||
}
|
||||
if h.opts.Level == nil {
|
||||
h.opts.Level = slog.LevelInfo
|
||||
}
|
||||
// Set the level var with the configured level
|
||||
h.levelVar.Set(h.opts.Level.Level())
|
||||
// And use it from now on
|
||||
h.opts.Level = &h.levelVar
|
||||
|
||||
// Create the JSON logger in case we need it
|
||||
jsonOpts := slog.HandlerOptions{
|
||||
Level: h.opts.Level,
|
||||
ReplaceAttr: mapLogLevelNames,
|
||||
}
|
||||
h.jsonHandler = slog.NewJSONHandler(&h.jsonBuf, &jsonOpts)
|
||||
return h
|
||||
}
|
||||
|
||||
// SetOutput sets a new output handler for the log output.
|
||||
//
|
||||
// This is for temporarily overriding the output.
|
||||
func (h *OutputHandler) SetOutput(fn outputFn) {
|
||||
h.output = append(h.output, fn)
|
||||
}
|
||||
|
||||
// ResetOutput resets the log output to what is was.
|
||||
func (h *OutputHandler) ResetOutput() {
|
||||
if len(h.output) > 0 {
|
||||
h.output = h.output[:len(h.output)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// AddOutput adds an additional logging destination of the type specified.
|
||||
func (h *OutputHandler) AddOutput(json bool, fn outputFn) {
|
||||
h.outputExtra = append(h.outputExtra, outputExtra{
|
||||
json: json,
|
||||
output: fn,
|
||||
})
|
||||
}
|
||||
|
||||
// SetLevel sets a new log level, returning the old one.
|
||||
func (h *OutputHandler) SetLevel(level slog.Level) slog.Level {
|
||||
oldLevel := h.levelVar.Level()
|
||||
h.levelVar.Set(level)
|
||||
return oldLevel
|
||||
}
|
||||
|
||||
// Set the writer for the log to that passed.
|
||||
func (h *OutputHandler) setWriter(writer io.Writer) {
|
||||
h.writer = writer
|
||||
}
|
||||
|
||||
// Set the format flags to that passed in.
|
||||
func (h *OutputHandler) setFormat(format logFormat) {
|
||||
h.format = format
|
||||
}
|
||||
|
||||
// clear format flags that this output type doesn't want
|
||||
func (h *OutputHandler) clearFormatFlags(bitMask logFormat) {
|
||||
h.format &^= bitMask
|
||||
}
|
||||
|
||||
// set format flags that this output type requires
|
||||
func (h *OutputHandler) setFormatFlags(bitMask logFormat) {
|
||||
h.format |= bitMask
|
||||
}
|
||||
|
||||
// Enabled returns whether this logger is enabled for this level.
|
||||
func (h *OutputHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
minLevel := slog.LevelInfo
|
||||
if h.opts.Level != nil {
|
||||
minLevel = h.opts.Level.Level()
|
||||
}
|
||||
return level >= minLevel
|
||||
}
|
||||
|
||||
// Create a log header in Go standard log format.
|
||||
func (h *OutputHandler) formatStdLogHeader(buf *bytes.Buffer, level slog.Level, t time.Time, object string, lineInfo string) {
|
||||
// Add time in Go standard format if requested
|
||||
if h.format&(logFormatDate|logFormatTime|logFormatMicroseconds) != 0 {
|
||||
if h.format&logFormatUTC != 0 {
|
||||
t = t.UTC()
|
||||
}
|
||||
if h.format&logFormatDate != 0 {
|
||||
year, month, day := t.Date()
|
||||
fmt.Fprintf(buf, "%04d/%02d/%02d ", year, month, day)
|
||||
}
|
||||
if h.format&(logFormatTime|logFormatMicroseconds) != 0 {
|
||||
hour, min, sec := t.Clock()
|
||||
fmt.Fprintf(buf, "%02d:%02d:%02d", hour, min, sec)
|
||||
if h.format&logFormatMicroseconds != 0 {
|
||||
fmt.Fprintf(buf, ".%06d", t.Nanosecond()/1e3)
|
||||
}
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
// Add source code filename:line if requested
|
||||
if h.format&(logFormatShortFile|logFormatLongFile) != 0 && lineInfo != "" {
|
||||
buf.WriteString(lineInfo)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
// Add PID if requested
|
||||
if h.format&logFormatPid != 0 {
|
||||
fmt.Fprintf(buf, "[%d] ", os.Getpid())
|
||||
}
|
||||
// Add log level if required
|
||||
if h.format&logFormatNoLevel == 0 {
|
||||
levelStr := slogLevelToString(level)
|
||||
fmt.Fprintf(buf, "%-6s: ", levelStr)
|
||||
}
|
||||
// Add object if passed
|
||||
if object != "" {
|
||||
buf.WriteString(object)
|
||||
buf.WriteByte(':')
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
// Create a log in standard Go log format into buf.
|
||||
func (h *OutputHandler) textLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) error {
|
||||
var lineInfo string
|
||||
if h.format&(logFormatShortFile|logFormatLongFile) != 0 {
|
||||
lineInfo = getCaller(2)
|
||||
}
|
||||
|
||||
var object string
|
||||
r.Attrs(func(attr slog.Attr) bool {
|
||||
if attr.Key == "object" {
|
||||
object = attr.Value.String()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
h.formatStdLogHeader(buf, r.Level, r.Time, object, lineInfo)
|
||||
buf.WriteString(r.Message)
|
||||
if buf.Len() == 0 || buf.Bytes()[buf.Len()-1] != '\n' { // Ensure newline
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a log in JSON format into buf.
|
||||
func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.Record) (err error) {
|
||||
// Call the JSON handler to create the JSON in buf
|
||||
r.AddAttrs(
|
||||
slog.String("source", getCaller(2)),
|
||||
)
|
||||
h.mu.Lock()
|
||||
err = h.jsonHandler.Handle(ctx, r)
|
||||
if err == nil {
|
||||
_, err = h.jsonBuf.WriteTo(buf)
|
||||
}
|
||||
h.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle outputs a log in the current format
|
||||
func (h *OutputHandler) Handle(ctx context.Context, r slog.Record) (err error) {
|
||||
var (
|
||||
bufJSON *bytes.Buffer
|
||||
bufText *bytes.Buffer
|
||||
buf *bytes.Buffer
|
||||
)
|
||||
|
||||
// Check whether we need to build Text or JSON logs or both
|
||||
needJSON := h.format&logFormatJSON != 0
|
||||
needText := !needJSON
|
||||
for _, out := range h.outputExtra {
|
||||
if out.json {
|
||||
needJSON = true
|
||||
} else {
|
||||
needText = true
|
||||
}
|
||||
}
|
||||
|
||||
if needJSON {
|
||||
var bufJSONBack [256]byte
|
||||
bufJSON = bytes.NewBuffer(bufJSONBack[:0])
|
||||
err = h.jsonLog(ctx, bufJSON, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if needText {
|
||||
var bufTextBack [256]byte
|
||||
bufText = bytes.NewBuffer(bufTextBack[:0])
|
||||
err = h.textLog(ctx, bufText, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
// Do the log, either to the default destination or to the alternate logging system
|
||||
if h.format&logFormatJSON != 0 {
|
||||
buf = bufJSON
|
||||
} else {
|
||||
buf = bufText
|
||||
}
|
||||
if len(h.output) > 0 {
|
||||
h.output[len(h.output)-1](r.Level, buf.String())
|
||||
err = nil
|
||||
} else {
|
||||
_, err = h.writer.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// Log to any additional destinations required
|
||||
for _, out := range h.outputExtra {
|
||||
if out.json {
|
||||
out.output(r.Level, bufJSON.String())
|
||||
} else {
|
||||
out.output(r.Level, bufText.String())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WithAttrs creates a new handler with the same writer, options, and flags.
|
||||
// Attributes are ignored for the output format of this specific handler.
|
||||
func (h *OutputHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return NewOutputHandler(h.writer, &h.opts, h.format)
|
||||
}
|
||||
|
||||
// WithGroup creates a new handler with the same writer, options, and flags.
|
||||
// Groups are ignored for the output format of this specific handler.
|
||||
func (h *OutputHandler) WithGroup(name string) slog.Handler {
|
||||
return NewOutputHandler(h.writer, &h.opts, h.format)
|
||||
}
|
||||
|
||||
// Check interface
|
||||
var _ slog.Handler = (*OutputHandler)(nil)
|
||||
264
fs/log/slog_test.go
Normal file
264
fs/log/slog_test.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"log/slog"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
utcPlusOne = time.FixedZone("UTC+1", 1*60*60)
|
||||
t0 = time.Date(2020, 1, 2, 3, 4, 5, 123456000, utcPlusOne)
|
||||
)
|
||||
|
||||
// Test slogLevelToString covers all mapped levels and an unknown level.
|
||||
func TestSlogLevelToString(t *testing.T) {
|
||||
tests := []struct {
|
||||
level slog.Level
|
||||
want string
|
||||
}{
|
||||
{slog.LevelDebug, "DEBUG"},
|
||||
{slog.LevelInfo, "INFO"},
|
||||
{fs.SlogLevelNotice, "NOTICE"},
|
||||
{slog.LevelWarn, "WARNING"},
|
||||
{slog.LevelError, "ERROR"},
|
||||
{fs.SlogLevelCritical, "CRITICAL"},
|
||||
{fs.SlogLevelAlert, "ALERT"},
|
||||
{fs.SlogLevelEmergency, "EMERGENCY"},
|
||||
// Unknown level should fall back to .String()
|
||||
{slog.Level(1234), slog.Level(1234).String()},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := slogLevelToString(tc.level)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
// Test mapLogLevelNames replaces only the LevelKey attr and lowercases it.
|
||||
func TestMapLogLevelNames(t *testing.T) {
|
||||
a := slog.Any(slog.LevelKey, slog.LevelWarn)
|
||||
mapped := mapLogLevelNames(nil, a)
|
||||
val, ok := mapped.Value.Any().(string)
|
||||
if !ok || val != "warning" {
|
||||
t.Errorf("mapLogLevelNames did not lowercase level: got %v", mapped.Value.Any())
|
||||
}
|
||||
// non-level attr should remain unchanged
|
||||
other := slog.String("foo", "bar")
|
||||
out := mapLogLevelNames(nil, other)
|
||||
assert.Equal(t, out.Value, other.Value, "mapLogLevelNames changed a non-level attr")
|
||||
}
|
||||
|
||||
// Test getCaller returns a file:line string of the correct form.
|
||||
func TestGetCaller(t *testing.T) {
|
||||
out := getCaller(0)
|
||||
assert.NotEqual(t, "", out)
|
||||
match := regexp.MustCompile(`^([^:]+):(\d+)$`).FindStringSubmatch(out)
|
||||
assert.NotNil(t, match)
|
||||
// Can't test this as it skips the /log/ directory!
|
||||
// assert.Equal(t, "slog_test.go", match[1])
|
||||
}
|
||||
|
||||
// Test formatStdLogHeader for various flag combinations.
|
||||
func TestFormatStdLogHeader(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
format logFormat
|
||||
lineInfo string
|
||||
object string
|
||||
wantPrefix string
|
||||
}{
|
||||
{"dateTime", logFormatDate | logFormatTime, "", "", "2020/01/02 03:04:05 "},
|
||||
{"time", logFormatTime, "", "", "03:04:05 "},
|
||||
{"date", logFormatDate, "", "", "2020/01/02 "},
|
||||
{"dateTimeUTC", logFormatDate | logFormatTime | logFormatUTC, "", "", "2020/01/02 02:04:05 "},
|
||||
{"dateTimeMicro", logFormatDate | logFormatTime | logFormatMicroseconds, "", "", "2020/01/02 03:04:05.123456 "},
|
||||
{"micro", logFormatMicroseconds, "", "", "03:04:05.123456 "},
|
||||
{"shortFile", logFormatShortFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
|
||||
{"longFile", logFormatLongFile, "foo.go:10", "03:04:05 ", "foo.go:10: "},
|
||||
{"timePID", logFormatPid, "", "", fmt.Sprintf("[%d] ", os.Getpid())},
|
||||
{"levelObject", 0, "", "myobj", "INFO : myobj: "},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
h := &OutputHandler{format: tc.format}
|
||||
buf := &bytes.Buffer{}
|
||||
h.formatStdLogHeader(buf, slog.LevelInfo, t0, tc.object, tc.lineInfo)
|
||||
if !strings.HasPrefix(buf.String(), tc.wantPrefix) {
|
||||
t.Errorf("%s: got %q; want prefix %q", tc.name, buf.String(), tc.wantPrefix)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test Enabled honors the HandlerOptions.Level.
|
||||
func TestEnabled(t *testing.T) {
|
||||
h := NewOutputHandler(&bytes.Buffer{}, nil, 0)
|
||||
assert.True(t, h.Enabled(context.Background(), slog.LevelInfo))
|
||||
assert.False(t, h.Enabled(context.Background(), slog.LevelDebug))
|
||||
|
||||
opts := &slog.HandlerOptions{Level: slog.LevelDebug}
|
||||
h2 := NewOutputHandler(&bytes.Buffer{}, opts, 0)
|
||||
assert.True(t, h2.Enabled(context.Background(), slog.LevelDebug))
|
||||
}
|
||||
|
||||
// Test clearFormatFlags and setFormatFlags bitwise ops.
|
||||
func TestClearSetFormatFlags(t *testing.T) {
|
||||
h := &OutputHandler{format: logFormatDate | logFormatTime}
|
||||
|
||||
h.clearFormatFlags(logFormatTime)
|
||||
assert.True(t, h.format&logFormatTime == 0)
|
||||
|
||||
h.setFormatFlags(logFormatMicroseconds)
|
||||
assert.True(t, h.format&logFormatMicroseconds != 0)
|
||||
}
|
||||
|
||||
// Test SetOutput and ResetOutput override the default writer.
|
||||
func TestSetResetOutput(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, 0)
|
||||
var gotOverride string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
gotOverride = txt
|
||||
}
|
||||
|
||||
h.SetOutput(out)
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", gotOverride)
|
||||
require.Equal(t, "", buf.String())
|
||||
|
||||
h.ResetOutput()
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
require.NotEqual(t, "", buf.String())
|
||||
}
|
||||
|
||||
// Test AddOutput sends to extra destinations.
|
||||
func TestAddOutput(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(false, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test AddOutputJSON sends JSON to extra destinations.
|
||||
func TestAddOutputJSON(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(true, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", extraText)
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", buf.String())
|
||||
assert.True(t, strings.HasPrefix(extraText, `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
|
||||
assert.True(t, strings.HasSuffix(extraText, "\"}\n"))
|
||||
}
|
||||
|
||||
// Test AddOutputUseJSONLog sends text to extra destinations.
|
||||
func TestAddOutputUseJSONLog(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate|logFormatTime|logFormatJSON)
|
||||
var extraText string
|
||||
out := func(_ slog.Level, txt string) {
|
||||
extraText = txt
|
||||
}
|
||||
|
||||
h.AddOutput(false, out)
|
||||
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "world", 0)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
assert.NotEqual(t, "", extraText)
|
||||
assert.True(t, strings.HasPrefix(buf.String(), `{"time":"2020-01-02T03:04:05.123456+01:00","level":"info","msg":"world","source":"`))
|
||||
assert.True(t, strings.HasSuffix(buf.String(), "\"}\n"))
|
||||
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
|
||||
}
|
||||
|
||||
// Test WithAttrs and WithGroup return new handlers with same settings.
|
||||
func TestWithAttrsAndGroup(t *testing.T) {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, logFormatDate)
|
||||
h2 := h.WithAttrs([]slog.Attr{slog.String("k", "v")})
|
||||
if _, ok := h2.(*OutputHandler); !ok {
|
||||
t.Error("WithAttrs returned wrong type")
|
||||
}
|
||||
h3 := h.WithGroup("grp")
|
||||
if _, ok := h3.(*OutputHandler); !ok {
|
||||
t.Error("WithGroup returned wrong type")
|
||||
}
|
||||
}
|
||||
|
||||
// Test textLog and jsonLog directly for basic correctness.
|
||||
func TestTextLogAndJsonLog(t *testing.T) {
|
||||
h := NewOutputHandler(&bytes.Buffer{}, nil, logFormatDate|logFormatTime)
|
||||
r := slog.NewRecord(t0, slog.LevelWarn, "msg!", 0)
|
||||
r.AddAttrs(slog.String("object", "obj"))
|
||||
|
||||
// textLog
|
||||
bufText := &bytes.Buffer{}
|
||||
require.NoError(t, h.textLog(context.Background(), bufText, r))
|
||||
out := bufText.String()
|
||||
if !strings.Contains(out, "WARNING") || !strings.Contains(out, "obj:") || !strings.HasSuffix(out, "\n") {
|
||||
t.Errorf("textLog output = %q", out)
|
||||
}
|
||||
|
||||
// jsonLog
|
||||
bufJSON := &bytes.Buffer{}
|
||||
require.NoError(t, h.jsonLog(context.Background(), bufJSON, r))
|
||||
j := bufJSON.String()
|
||||
if !strings.Contains(j, `"level":"warning"`) || !strings.Contains(j, `"msg":"msg!"`) {
|
||||
t.Errorf("jsonLog output = %q", j)
|
||||
}
|
||||
}
|
||||
|
||||
// Table-driven test for JSON vs text Handle behavior.
|
||||
func TestHandleFormatFlags(t *testing.T) {
|
||||
r := slog.NewRecord(t0, slog.LevelInfo, "hi", 0)
|
||||
cases := []struct {
|
||||
name string
|
||||
format logFormat
|
||||
wantJSON bool
|
||||
}{
|
||||
{"textMode", 0, false},
|
||||
{"jsonMode", logFormatJSON, true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
buf := &bytes.Buffer{}
|
||||
h := NewOutputHandler(buf, nil, tc.format)
|
||||
require.NoError(t, h.Handle(context.Background(), r))
|
||||
out := buf.String()
|
||||
if tc.wantJSON {
|
||||
if !strings.HasPrefix(out, "{") || !strings.Contains(out, `"level":"info"`) {
|
||||
t.Errorf("%s: got %q; want JSON", tc.name, out)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(out, "INFO") {
|
||||
t.Errorf("%s: got %q; want text INFO", tc.name, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Starts syslog if configured, returns true if it was started
|
||||
func startSysLog() bool {
|
||||
func startSysLog(handler *OutputHandler) bool {
|
||||
fs.Fatalf(nil, "--syslog not supported on %s platform", runtime.GOOS)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"log"
|
||||
"log/slog"
|
||||
"log/syslog"
|
||||
"os"
|
||||
"path"
|
||||
@@ -39,7 +39,7 @@ var (
|
||||
)
|
||||
|
||||
// Starts syslog
|
||||
func startSysLog() bool {
|
||||
func startSysLog(handler *OutputHandler) bool {
|
||||
facility, ok := syslogFacilityMap[Opt.SyslogFacility]
|
||||
if !ok {
|
||||
fs.Fatalf(nil, "Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility)
|
||||
@@ -49,27 +49,27 @@ func startSysLog() bool {
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start syslog: %v", err)
|
||||
}
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(w)
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
|
||||
handler.setFormatFlags(logFormatNoLevel)
|
||||
handler.SetOutput(func(level slog.Level, text string) {
|
||||
switch level {
|
||||
case fs.LogLevelEmergency:
|
||||
case fs.SlogLevelEmergency:
|
||||
_ = w.Emerg(text)
|
||||
case fs.LogLevelAlert:
|
||||
case fs.SlogLevelAlert:
|
||||
_ = w.Alert(text)
|
||||
case fs.LogLevelCritical:
|
||||
case fs.SlogLevelCritical:
|
||||
_ = w.Crit(text)
|
||||
case fs.LogLevelError:
|
||||
case slog.LevelError:
|
||||
_ = w.Err(text)
|
||||
case fs.LogLevelWarning:
|
||||
case slog.LevelWarn:
|
||||
_ = w.Warning(text)
|
||||
case fs.LogLevelNotice:
|
||||
case fs.SlogLevelNotice:
|
||||
_ = w.Notice(text)
|
||||
case fs.LogLevelInfo:
|
||||
case slog.LevelInfo:
|
||||
_ = w.Info(text)
|
||||
case fs.LogLevelDebug:
|
||||
case slog.LevelDebug:
|
||||
_ = w.Debug(text)
|
||||
}
|
||||
}
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
// Enables systemd logs if configured or if auto-detected
|
||||
func startSystemdLog() bool {
|
||||
func startSystemdLog(handler *OutputHandler) bool {
|
||||
fs.Fatalf(nil, "--log-systemd not supported on %s platform", runtime.GOOS)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -7,54 +7,47 @@ package log
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Enables systemd logs if configured or if auto-detected
|
||||
func startSystemdLog() bool {
|
||||
flagsStr := "," + Opt.Format + ","
|
||||
var flags int
|
||||
if strings.Contains(flagsStr, ",longfile,") {
|
||||
flags |= log.Llongfile
|
||||
}
|
||||
if strings.Contains(flagsStr, ",shortfile,") {
|
||||
flags |= log.Lshortfile
|
||||
}
|
||||
log.SetFlags(flags)
|
||||
func startSystemdLog(handler *OutputHandler) bool {
|
||||
handler.clearFormatFlags(logFormatDate | logFormatTime | logFormatMicroseconds | logFormatUTC | logFormatLongFile | logFormatShortFile | logFormatPid)
|
||||
handler.setFormatFlags(logFormatNoLevel)
|
||||
// TODO: Use the native journal.Print approach rather than a custom implementation
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
handler.SetOutput(func(level slog.Level, text string) {
|
||||
text = fmt.Sprintf("<%s>%-6s: %s", systemdLogPrefix(level), level, text)
|
||||
_ = log.Output(4, text)
|
||||
}
|
||||
})
|
||||
return true
|
||||
}
|
||||
|
||||
var logLevelToSystemdPrefix = []journal.Priority{
|
||||
fs.LogLevelEmergency: journal.PriEmerg,
|
||||
fs.LogLevelAlert: journal.PriAlert,
|
||||
fs.LogLevelCritical: journal.PriCrit,
|
||||
fs.LogLevelError: journal.PriErr,
|
||||
fs.LogLevelWarning: journal.PriWarning,
|
||||
fs.LogLevelNotice: journal.PriNotice,
|
||||
fs.LogLevelInfo: journal.PriInfo,
|
||||
fs.LogLevelDebug: journal.PriDebug,
|
||||
var slogLevelToSystemdPrefix = map[slog.Level]journal.Priority{
|
||||
fs.SlogLevelEmergency: journal.PriEmerg,
|
||||
fs.SlogLevelAlert: journal.PriAlert,
|
||||
fs.SlogLevelCritical: journal.PriCrit,
|
||||
slog.LevelError: journal.PriErr,
|
||||
slog.LevelWarn: journal.PriWarning,
|
||||
fs.SlogLevelNotice: journal.PriNotice,
|
||||
slog.LevelInfo: journal.PriInfo,
|
||||
slog.LevelDebug: journal.PriDebug,
|
||||
}
|
||||
|
||||
func systemdLogPrefix(l fs.LogLevel) string {
|
||||
if l >= fs.LogLevel(len(logLevelToSystemdPrefix)) {
|
||||
func systemdLogPrefix(l slog.Level) string {
|
||||
prio, ok := slogLevelToSystemdPrefix[l]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return strconv.Itoa(int(logLevelToSystemdPrefix[l]))
|
||||
return strconv.Itoa(int(prio))
|
||||
}
|
||||
|
||||
func isJournalStream() bool {
|
||||
if usingJournald, _ := journal.StderrIsJournalStream(); usingJournald {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -6,12 +6,11 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -65,18 +64,16 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
check := func(i int, wantErrors int64, wantChecks int64, oneway bool, wantOutput map[string]string) {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: r.Fremote,
|
||||
Fsrc: r.Flocal,
|
||||
OneWay: oneway,
|
||||
}
|
||||
addBuffers(&opt)
|
||||
err := checkFunction(ctx, &opt)
|
||||
var err error
|
||||
buf := bilib.CaptureOutput(func() {
|
||||
err = checkFunction(ctx, &opt)
|
||||
})
|
||||
gotErrors := accounting.GlobalStats().GetErrors()
|
||||
gotChecks := accounting.GlobalStats().GetChecks()
|
||||
if wantErrors == 0 && err != nil {
|
||||
@@ -88,7 +85,7 @@ func testCheck(t *testing.T, checkFunction func(ctx context.Context, opt *operat
|
||||
if wantErrors != gotErrors {
|
||||
t.Errorf("%d: Expecting %d errors but got %d", i, wantErrors, gotErrors)
|
||||
}
|
||||
if gotChecks > 0 && !strings.Contains(buf.String(), "matching files") {
|
||||
if gotChecks > 0 && !strings.Contains(string(buf), "matching files") {
|
||||
t.Errorf("%d: Total files matching line missing", i)
|
||||
}
|
||||
if wantChecks != gotChecks {
|
||||
@@ -389,9 +386,6 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
|
||||
checkRun := func(runNo, wantChecks, wantErrors int, want wantType) {
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer log.SetOutput(os.Stderr)
|
||||
|
||||
opt := operations.CheckOpt{
|
||||
Combined: new(bytes.Buffer),
|
||||
@@ -401,8 +395,10 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
MissingOnSrc: new(bytes.Buffer),
|
||||
MissingOnDst: new(bytes.Buffer),
|
||||
}
|
||||
err := operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
|
||||
|
||||
var err error
|
||||
buf := bilib.CaptureOutput(func() {
|
||||
err = operations.CheckSum(ctx, dataFs, r.Fremote, sumFile, hashType, &opt, download)
|
||||
})
|
||||
gotErrors := int(accounting.GlobalStats().GetErrors())
|
||||
if wantErrors == 0 {
|
||||
assert.NoError(t, err, "unexpected error in run %d", runNo)
|
||||
@@ -414,7 +410,7 @@ func testCheckSum(t *testing.T, download bool) {
|
||||
|
||||
gotChecks := int(accounting.GlobalStats().GetChecks())
|
||||
if wantChecks > 0 || gotChecks > 0 {
|
||||
assert.Contains(t, buf.String(), "matching files", "missing matching files in run %d", runNo)
|
||||
assert.Contains(t, string(buf), "matching files", "missing matching files in run %d", runNo)
|
||||
}
|
||||
assert.Equal(t, wantChecks, gotChecks, "wrong number of checks in run %d", runNo)
|
||||
|
||||
|
||||
@@ -399,6 +399,12 @@ backends:
|
||||
- TestIntegration/FsMkdir/FsEncoding/punctuation
|
||||
- TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
|
||||
fastlist: false
|
||||
- backend: "webdav"
|
||||
remote: "TestWebdavInfiniteScale:"
|
||||
ignore:
|
||||
- TestIntegration/FsMkdir/FsEncoding/punctuation
|
||||
- TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
|
||||
fastlist: false
|
||||
- backend: "webdav"
|
||||
remote: "TestWebdavRclone:"
|
||||
ignore:
|
||||
|
||||
49
fstest/testserver/init.d/TestWebdavInfiniteScale
Executable file
49
fstest/testserver/init.d/TestWebdavInfiniteScale
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
NAME=infinitescale
|
||||
USER=admin
|
||||
PASS=admin
|
||||
PORT=9200
|
||||
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
|
||||
docker run --rm --name $NAME \
|
||||
-v $(pwd):/etc/ocis \
|
||||
-e "OCIS_INSECURE=true" \
|
||||
-e "IDM_ADMIN_PASSWORD=$PASS" \
|
||||
-e "OCIS_FORCE_CONFIG_OVERWRITE=true" \
|
||||
-e "OCIS_URL=https://127.0.0.1:$PORT" \
|
||||
owncloud/ocis \
|
||||
init
|
||||
|
||||
docker run --rm -d --name $NAME \
|
||||
-e "OCIS_LOG_LEVEL=debug" \
|
||||
-e "OCIS_LOG_PRETTY=true" \
|
||||
-e "OCIS_URL=https://127.0.0.1:$PORT" \
|
||||
-e "OCIS_ADMIN_USER_ID=some-admin-user-id-0000-100000000000" \
|
||||
-e "IDM_ADMIN_PASSWORD=$PASS" \
|
||||
-e "OCIS_INSECURE=true" \
|
||||
-e "PROXY_ENABLE_BASIC_AUTH=true" \
|
||||
-v $(pwd):/etc/ocis \
|
||||
-p 127.0.0.1:${PORT}:9200 \
|
||||
owncloud/ocis
|
||||
|
||||
echo type=webdav
|
||||
echo url=https://127.0.0.1:${PORT}/dav/spaces/some-admin-user-id-0000-100000000000
|
||||
echo user=$USER
|
||||
echo pass=$(rclone obscure $PASS)
|
||||
echo vendor=infinitescale
|
||||
echo _connect=127.0.0.1:${PORT}
|
||||
}
|
||||
|
||||
stop() {
|
||||
# Clean up the mess
|
||||
docker stop infinitescale
|
||||
rm -f ./ocis.yaml
|
||||
}
|
||||
|
||||
. $(dirname "$0")/run.bash
|
||||
16
go.mod
16
go.mod
@@ -66,7 +66,6 @@ require (
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/shirou/gopsutil/v4 v4.25.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
@@ -81,12 +80,12 @@ require (
|
||||
github.com/zeebo/blake3 v0.2.4
|
||||
go.etcd.io/bbolt v1.4.0
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.35.0
|
||||
golang.org/x/net v0.36.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/sync v0.11.0
|
||||
golang.org/x/sys v0.30.0
|
||||
golang.org/x/text v0.22.0
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/text v0.23.0
|
||||
golang.org/x/time v0.10.0
|
||||
google.golang.org/api v0.223.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
@@ -138,7 +137,7 @@ require (
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.2 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
@@ -208,6 +207,7 @@ require (
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
github.com/samber/lo v1.47.0 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
@@ -243,5 +243,5 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/pkg/xattr v0.4.10
|
||||
golang.org/x/mobile v0.0.0-20250218173827-cd096645fcd3
|
||||
golang.org/x/term v0.29.0
|
||||
golang.org/x/term v0.30.0
|
||||
)
|
||||
|
||||
28
go.sum
28
go.sum
@@ -219,8 +219,8 @@ github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq4
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
|
||||
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.8.3 h1:K+0AjQp63JEZTEMZiwsI9g0+hAMNohwUOtY0RPGexmc=
|
||||
github.com/ebitengine/purego v0.8.3/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emersion/go-message v0.18.0 h1:7LxAXHRpSeoO/Wom3ZApVZYG7c3d17yCScYce8WiXA8=
|
||||
github.com/emersion/go-message v0.18.0/go.mod h1:Zi69ACvzaoV/MBnrxfVBPV3xWEuCmC2nEN39oJF4B8A=
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 h1:IbFBtwoTQyw0fIM5xv1HF+Y+3ZijDR839WMulgxCcUY=
|
||||
@@ -703,8 +703,8 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -789,8 +789,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -814,8 +814,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -870,8 +870,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -887,8 +887,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -904,8 +904,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
75
lib/proxy/http.go
Normal file
75
lib/proxy/http.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// HTTPConnectDial connects using HTTP CONNECT via proxyDialer
|
||||
//
|
||||
// It will read the HTTP proxy address from the environment in the
|
||||
// standard way.
|
||||
//
|
||||
// It optionally takes a proxyDialer to dial the HTTP proxy server.
|
||||
// If nil is passed, it will use the default net.Dialer.
|
||||
func HTTPConnectDial(network, addr string, proxyURL *url.URL, proxyDialer proxy.Dialer) (net.Conn, error) {
|
||||
if proxyDialer == nil {
|
||||
proxyDialer = &net.Dialer{}
|
||||
}
|
||||
if proxyURL == nil {
|
||||
return proxyDialer.Dial(network, addr)
|
||||
}
|
||||
|
||||
// prepare proxy host with default ports
|
||||
host := proxyURL.Host
|
||||
if !strings.Contains(host, ":") {
|
||||
if strings.EqualFold(proxyURL.Scheme, "https") {
|
||||
host += ":443"
|
||||
} else {
|
||||
host += ":80"
|
||||
}
|
||||
}
|
||||
|
||||
// connect to proxy
|
||||
conn, err := proxyDialer.Dial(network, host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to Dial: %q", err)
|
||||
}
|
||||
|
||||
// wrap TLS if HTTPS proxy
|
||||
if strings.EqualFold(proxyURL.Scheme, "https") {
|
||||
tlsConfig := &tls.Config{ServerName: proxyURL.Hostname()}
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to make TLS connection: %q", err)
|
||||
}
|
||||
conn = tlsConn
|
||||
}
|
||||
|
||||
// send CONNECT
|
||||
_, err = fmt.Fprintf(conn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, addr)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to send CONNECT: %q", err)
|
||||
}
|
||||
br := bufio.NewReader(conn)
|
||||
req := &http.Request{URL: &url.URL{Scheme: "http", Host: addr}}
|
||||
resp, err := http.ReadResponse(br, req)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed to read response: %q", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
_ = conn.Close()
|
||||
return nil, fmt.Errorf("HTTP CONNECT proxy failed: %s", resp.Status)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
Reference in New Issue
Block a user