1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-29 07:43:45 +00:00

Compare commits

..

16 Commits

Author SHA1 Message Date
Nick Craig-Wood
1d53714aba config: increase input buffer size to 16k to fix onedrive oauth - fixes #5835 2021-11-23 17:51:53 +00:00
Nick Craig-Wood
f26abc89a6 union: fix treatment of remotes with // in
See: https://forum.rclone.org/t/connection-string-with-union-backend-and-a-lot-of-quotes/27577
2021-11-23 17:41:12 +00:00
lindwurm
b5abbe819f s3: Add Wasabi AP Northeast 2 endpoint info
* Wasabi starts to provide AP Northeast 2 (Osaka) endpoint, so add it to the list
* Rename ap-northeast-1 as "AP Northeast 1 (Tokyo)" from "AP Northeast"

Signed-off-by: lindwurm <lindwurm.q@gmail.com>
2021-11-22 18:02:57 +00:00
Nick Craig-Wood
a351484997 sftp: fix timeout on hashing large files by sending keepalives
Before this fix the SFTP sessions could timeout when doing hashes if
they took longer than the --timeout parameter.

This patch sends keepalive packets every minute while a shell command
is running to keep the connection open.

See: https://forum.rclone.org/t/rclone-check-over-sftp-failure-to-calculate-md5-hash-for-large-files/27487
2021-11-22 15:26:29 +00:00
Nick Craig-Wood
099eff8891 sftp: refactor so we only have one way of running remote commands
This also returns errors from running ssh Hash commands which we
didn't do before.
2021-11-22 15:26:29 +00:00
albertony
c4cb167d4a Add rsapkf and Will Holtz to contributors 2021-11-21 19:26:05 +01:00
Will Holtz
38e100ab19 docs/config: more explicit doc for config create --all with params 2021-11-21 19:22:19 +01:00
rsapkf
db95a0d6c3 docs/pcloud: fix typo 2021-11-21 19:16:19 +01:00
Nick Craig-Wood
df07964db3 azureblob: raise --azureblob-upload-concurrency to 16 by default
After speed testing it was discovered that upload speed goes up pretty
much linearly with upload concurrency. This patch changes the default
from 4 to 16 which means that rclone will use 16 * 4M = 64M per
transfer which is OK even for low memory devices.

This adds a note that performance may be increased by increasing
upload concurrency.

See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437/9
2021-11-18 16:09:02 +00:00
Nick Craig-Wood
fbc4c4ad9a azureblob: remove 100MB upper limit on chunk_size as it is no longer needed 2021-11-18 16:09:02 +00:00
Nick Craig-Wood
4454b3e1ae azureblob: implement --azureblob-upload-concurrency parameter to speed uploads
See: https://forum.rclone.org/t/performance-of-rclone-vs-azcopy/27437
2021-11-18 16:08:57 +00:00
Nick Craig-Wood
f9321fccbb Add deinferno to contributors 2021-11-18 15:51:45 +00:00
Ole Frost
3c2252b7c0 fs/operations: add server-side moves to stats
Fixes #5430
2021-11-18 12:20:56 +00:00
Cnly
51c952654c fstests: treat accountUpgradeRequired as success for OneDrive PublicLink 2021-11-17 17:35:17 +00:00
deinferno
80e47be65f yandex: add permanent deletion support 2021-11-17 16:57:41 +00:00
Michał Matczuk
38dc3e93ee fshttp: add prometheus metrics for HTTP status code
This patch adds rclone_http_status_code counter vector labeled by

* host,
* method,
* code.

It allows to see HTTP errors, backoffs etc.

The Metrics struct is designed for extensibility.
Adding new metrics is a matter of adding them to Metrics struct and including them in the response handling.

This feature has been discussed in the forum [1].

[1] https://forum.rclone.org/t/prometheus-metrics/14484
2021-11-17 18:38:12 +03:00
28 changed files with 219 additions and 105 deletions

View File

@@ -50,8 +50,6 @@ const (
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -134,12 +132,33 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size (<= 100 MiB).
Help: `Upload chunk size.
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -257,6 +276,7 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -416,9 +436,6 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -1667,10 +1684,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: uploadConcurrency,
MaxBuffers: o.fs.opt.UploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
}
// Don't retry, return a retry error instead

View File

@@ -17,12 +17,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
})
}

View File

@@ -44,7 +44,6 @@ func TestIntegration(t *testing.T) {
"UserInfo",
"Disconnect",
},
QuickTestOK: true,
}
if *fstest.RemoteName == "" {
name := "TestChunker"

View File

@@ -16,9 +16,6 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
@@ -64,6 +61,5 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -1049,10 +1049,6 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
if o.f.opt.NoDataEncryption {
// If no encryption, just return the hash of the underlying object
return srcObj.Hash(ctx, hash)
}
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)

View File

@@ -77,11 +77,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
if f.opt.NoDataEncryption {
_, err = outBuf.WriteString(contents)
} else {
_, err = io.Copy(&outBuf, enc)
}
_, err = io.Copy(&outBuf, enc)
require.NoError(t, err)
var oi fs.ObjectInfo = obj
@@ -100,12 +96,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
var wantHash [md5.Size]byte
if f.opt.NoDataEncryption {
wantHash = md5.Sum([]byte(contents))
} else {
wantHash = md5.Sum(outBuf.Bytes())
}
wantHash := md5.Sum(outBuf.Bytes())
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)

View File

@@ -46,7 +46,6 @@ func TestStandardBase32(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -68,7 +67,6 @@ func TestStandardBase64(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -90,7 +88,6 @@ func TestStandardBase32768(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -112,7 +109,6 @@ func TestOff(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -135,7 +131,6 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -159,6 +154,5 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -25,7 +25,6 @@ func TestIntegration(t *testing.T) {
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
QuickTestOK: true,
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")

View File

@@ -11,8 +11,7 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
QuickTestOK: true,
RemoteName: "",
NilObject: (*local.Object)(nil),
})
}

View File

@@ -10,8 +10,7 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: ":memory:",
NilObject: (*Object)(nil),
QuickTestOK: true,
RemoteName: ":memory:",
NilObject: (*Object)(nil),
})
}

View File

@@ -761,7 +761,11 @@ func init() {
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast endpoint",
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-2.wasabisys.com",
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
Provider: "Wasabi",
}},
}, {

View File

@@ -42,7 +42,8 @@ const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
decayConstant = 2 // bigger for slower decay, exponential
keepAliveInterval = time.Minute // send keepalives every this long while running commands
)
var (
@@ -339,6 +340,32 @@ func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
c.sendKeepAlive()
case <-done:
return
}
}
}()
return done
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
@@ -1098,6 +1125,9 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}
defer f.putSftpConnection(&c, err)
// Send keepalives while the connection is open
defer close(c.sendKeepAlives(keepAliveInterval))
session, err := c.sshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("run: get SFTP session: %w", err)
@@ -1110,10 +1140,12 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
if err != nil {
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
}
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
return stdout.Bytes(), nil
}
@@ -1230,8 +1262,6 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
if o.fs.opt.DisableHashCheck {
return "", nil
}
@@ -1255,36 +1285,16 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
if err != nil {
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {

View File

@@ -42,7 +42,6 @@ func TestStandard(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -65,7 +64,6 @@ func TestRO(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -88,7 +86,6 @@ func TestNC(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -111,7 +108,6 @@ func TestPolicy1(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -134,7 +130,6 @@ func TestPolicy2(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -157,6 +152,5 @@ func TestPolicy3(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"io"
"math"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
@@ -91,7 +89,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
return nil, err
}
f.RootFs = rFs
rootString := path.Join(remote, filepath.ToSlash(root))
rootString := fspath.JoinRootPath(remote, root)
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err

View File

@@ -66,6 +66,11 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -79,8 +84,9 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
Enc encoder.MultiEncoder `config:"encoding"`
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote yandex
@@ -630,7 +636,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
}
//delete directory
return f.delete(ctx, root, false)
return f.delete(ctx, root, f.opt.HardDelete)
}
// Rmdir deletes the container
@@ -1141,7 +1147,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.filePath(), false)
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -550,3 +550,6 @@ put them back in again.` >}}
* Fredric Arklid <fredric.arklid@consid.se>
* Andy Jackson <Andrew.Jackson@bl.uk>
* Sinan Tan <i@tinytangent.com>
* deinferno <14363193+deinferno@users.noreply.github.com>
* rsapkf <rsapkfff@pm.me>
* Will Holtz <wholtz@gmail.com>

View File

@@ -81,6 +81,14 @@ key. It is stored using RFC3339 Format time with nanosecond
precision. The metadata is supplied during directory listings so
there is no overhead to using it.
### Performance
When uploading large files, increasing the value of
`--azureblob-upload-concurrency` will increase performance at the cost
of using more memory. The default of 16 is set quite conservatively to
use less memory. It maybe be necessary raise it to 64 or higher to
fully utilize a 1 GBit/s link with a single file transfer.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)

View File

@@ -107,8 +107,9 @@ At the end of the non interactive process, rclone will return a result
with `State` as empty string.
If `--all` is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
not just the post config questions. Parameters that are supplied on
the command line or from environment variables are used as defaults
for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration.

View File

@@ -80,7 +80,7 @@ List all the files in your pCloud
rclone ls remote:
To copy a local directory to an pCloud directory called backup
To copy a local directory to a pCloud directory called backup
rclone copy /home/source remote:backup

View File

@@ -1062,7 +1062,9 @@ Required when using an S3 clone.
- "s3.eu-central-1.wasabisys.com"
- Wasabi EU Central endpoint
- "s3.ap-northeast-1.wasabisys.com"
- Wasabi AP Northeast endpoint
- Wasabi AP Northeast 1 (Tokyo) endpoint
- "s3.ap-northeast-2.wasabisys.com"
- Wasabi AP Northeast 2 (Osaka) endpoint
#### --s3-location-constraint

View File

@@ -620,7 +620,7 @@ issue](https://github.com/pkg/sftp/issues/156) is fixed.
Note that since SFTP isn't HTTP based the following flags don't work
with it: `--dump-headers`, `--dump-bodies`, `--dump-auth`
Note that `--timeout` isn't supported (but `--contimeout` is).
Note that `--timeout` and `--contimeout` are both supported.
## C14 {#c14}

View File

@@ -175,6 +175,15 @@ Leave blank to use the provider defaults.
- Type: string
- Default: ""
#### --yandex-hard-delete
Delete files permanently rather than putting them into the trash.
- Config: hard_delete
- Env Var: RCLONE_YANDEX_HARD_DELETE
- Type: bool
- Default: false
#### --yandex-encoding
This sets the encoding for the backend.

View File

@@ -24,9 +24,11 @@ import (
"golang.org/x/text/unicode/norm"
)
const inputBufferSize = 16 * 1024
// ReadLine reads some input
var ReadLine = func() string {
buf := bufio.NewReader(os.Stdin)
buf := bufio.NewReaderSize(os.Stdin, inputBufferSize)
line, err := buf.ReadString('\n')
if err != nil {
log.Fatalf("Failed to read line: %v", err)

View File

@@ -136,12 +136,14 @@ func NewClient(ctx context.Context) *http.Client {
// Transport is our http Transport which wraps an http.Transport
// * Sets the User Agent
// * Does logging
// * Updates metrics
type Transport struct {
*http.Transport
dump fs.DumpFlags
filterRequest func(req *http.Request)
userAgent string
headers []*fs.HTTPOption
metrics *Metrics
}
// newTransport wraps the http.Transport passed in and logs all
@@ -152,6 +154,7 @@ func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
dump: ci.Dump,
userAgent: ci.UserAgent,
headers: ci.Headers,
metrics: DefaultMetrics,
}
}
@@ -283,6 +286,9 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
fs.Debugf(nil, "%s", separatorResp)
logMutex.Unlock()
}
// Update metrics
t.metrics.onResponse(req, resp)
if err == nil {
checkServerTime(req, resp)
}

51
fs/fshttp/prometheus.go Normal file
View File

@@ -0,0 +1,51 @@
package fshttp
import (
"fmt"
"net/http"
"github.com/prometheus/client_golang/prometheus"
)
// Metrics provide Transport HTTP level metrics.
type Metrics struct {
StatusCode *prometheus.CounterVec
}
// NewMetrics creates a new metrics instance, the instance shall be assigned to
// DefaultMetrics before any processing takes place.
func NewMetrics(namespace string) *Metrics {
return &Metrics{
StatusCode: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "http",
Name: "status_code",
}, []string{"host", "method", "code"}),
}
}
// DefaultMetrics specifies metrics used for new Transports.
var DefaultMetrics = (*Metrics)(nil)
// Collectors returns all prometheus metrics as collectors for registration.
func (m *Metrics) Collectors() []prometheus.Collector {
if m == nil {
return nil
}
return []prometheus.Collector{
m.StatusCode,
}
}
func (m *Metrics) onResponse(req *http.Request, resp *http.Response) {
if m == nil {
return
}
var statusCode = 0
if resp != nil {
statusCode = resp.StatusCode
}
m.StatusCode.WithLabelValues(req.Host, req.Method, fmt.Sprint(statusCode)).Inc()
}

View File

@@ -405,7 +405,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
if err == nil {
dst = newDst
in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server-side transfer
err = in.Close()
_ = in.Close()
} else {
_ = in.Close()
}
@@ -598,6 +598,8 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
}
}
// Move dst <- src
in := tr.Account(ctx, nil) // account the transfer
in.ServerSideCopyStart()
newDst, err = doMove(ctx, src, remote)
switch err {
case nil:
@@ -606,13 +608,16 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
} else {
fs.Infof(src, "Moved (server-side)")
}
in.ServerSideCopyEnd(newDst.Size()) // account the bytes for the server-side transfer
_ = in.Close()
return newDst, nil
case fs.ErrorCantMove:
fs.Debugf(src, "Can't move, switching to copy")
_ = in.Close()
default:
err = fs.CountError(err)
fs.Errorf(src, "Couldn't move: %v", err)
_ = in.Close()
return newDst, err
}
}

View File

@@ -18,23 +18,22 @@ import (
"sync"
"time"
"github.com/rclone/rclone/fs/rc/webgui"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/skratchdot/open-golang/open"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/rc/jobs"
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/webgui"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/random"
"github.com/skratchdot/open-golang/open"
)
var promHandler http.Handler
@@ -43,6 +42,13 @@ var onlyOnceWarningAllowOrigin sync.Once
func init() {
rcloneCollector := accounting.NewRcloneCollector(context.Background())
prometheus.MustRegister(rcloneCollector)
m := fshttp.NewMetrics("rclone")
for _, c := range m.Collectors() {
prometheus.MustRegister(c)
}
fshttp.DefaultMetrics = m
promHandler = promhttp.Handler()
}

View File

@@ -295,7 +295,6 @@ type Opt struct {
SkipFsCheckWrap bool // if set skip FsCheckWrap
SkipObjectCheckWrap bool // if set skip ObjectCheckWrap
SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks
QuickTestOK bool // OK to run this test in "make quicktest"
}
// returns true if x is found in ss
@@ -360,7 +359,7 @@ func Run(t *testing.T, opt *Opt) {
unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
)
if !opt.QuickTestOK && strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
t.Skip("quicktest only")
}
@@ -442,6 +441,10 @@ func Run(t *testing.T, opt *Opt) {
}
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
// Get fsInfo which contains type, etc. of the fs
fsInfo, _, _, _, err := fs.ConfigFs(subRemoteName)
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
// Skip the rest if it failed
skipIfNotOk(t)
@@ -1588,12 +1591,30 @@ func Run(t *testing.T, opt *Opt) {
t.Run("PublicLink", func(t *testing.T) {
skipIfNotOk(t)
doPublicLink := f.Features().PublicLink
if doPublicLink == nil {
publicLinkFunc := f.Features().PublicLink
if publicLinkFunc == nil {
t.Skip("FS has no PublicLinker interface")
}
type PublicLinkFunc func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error)
wrapPublicLinkFunc := func(f PublicLinkFunc) PublicLinkFunc {
return func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
link, err = publicLinkFunc(ctx, remote, expire, unlink)
if err == nil {
return
}
// For OneDrive Personal, link expiry is a premium feature
// Don't let it fail the test (https://github.com/rclone/rclone/issues/5420)
if fsInfo.Name == "onedrive" && strings.Contains(err.Error(), "accountUpgradeRequired") {
t.Log("treating accountUpgradeRequired as success for PublicLink")
link, err = "bogus link to "+remote, nil
}
return
}
}
expiry := fs.Duration(60 * time.Second)
doPublicLink := wrapPublicLinkFunc(publicLinkFunc)
// if object not found
link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false)
@@ -1640,7 +1661,7 @@ func Run(t *testing.T, opt *Opt) {
_, err = subRemote.Put(ctx, buf, obji)
require.NoError(t, err)
link4, err := subRemote.Features().PublicLink(ctx, "", expiry, false)
link4, err := wrapPublicLinkFunc(subRemote.Features().PublicLink)(ctx, "", expiry, false)
require.NoError(t, err, "Sharing root in a sub-remote should work")
require.NotEqual(t, "", link4, "Link should not be empty")
}