mirror of
https://github.com/rclone/rclone.git
synced 2026-01-28 07:13:39 +00:00
Compare commits
2 Commits
fix-5835-c
...
fix-tests
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a80287effd | ||
|
|
4216d55a05 |
@@ -50,6 +50,8 @@ const (
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
storageDefaultBaseURL = "blob.core.windows.net"
|
||||
defaultChunkSize = 4 * fs.Mebi
|
||||
maxChunkSize = 100 * fs.Mebi
|
||||
uploadConcurrency = 4
|
||||
defaultAccessTier = azblob.AccessTierNone
|
||||
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
|
||||
// Default storage account, key and blob endpoint for emulator support,
|
||||
@@ -132,33 +134,12 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Upload chunk size.
|
||||
Help: `Upload chunk size (<= 100 MiB).
|
||||
|
||||
Note that this is stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
"--transfers" chunks stored at once in memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed
|
||||
links and these uploads do not fully utilize your bandwidth, then
|
||||
increasing this may help to speed up the transfers.
|
||||
|
||||
In tests, upload speed increases almost linearly with upload
|
||||
concurrency. For example to fill a gigabit pipe it may be necessary to
|
||||
raise this to 64. Note that this will use more memory.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Help: `Size of blob list.
|
||||
@@ -276,7 +257,6 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
SASURL string `config:"sas_url"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ListChunkSize uint `config:"list_chunk"`
|
||||
AccessTier string `config:"access_tier"`
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
@@ -436,6 +416,9 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
if cs > maxChunkSize {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1684,10 +1667,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||
BufferSize: int(o.fs.opt.ChunkSize),
|
||||
MaxBuffers: o.fs.opt.UploadConcurrency,
|
||||
MaxBuffers: uploadConcurrency,
|
||||
Metadata: o.meta,
|
||||
BlobHTTPHeaders: httpHeaders,
|
||||
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
|
||||
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
|
||||
@@ -17,10 +17,12 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MaxChunkSize: maxChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ func TestIntegration(t *testing.T) {
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
QuickTestOK: true,
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
name := "TestChunker"
|
||||
|
||||
@@ -16,6 +16,9 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
@@ -61,5 +64,6 @@ func TestRemoteGzip(t *testing.T) {
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1049,6 +1049,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
if srcObj.Fs().Features().IsLocal {
|
||||
if o.f.opt.NoDataEncryption {
|
||||
// If no encryption, just return the hash of the underlying object
|
||||
return srcObj.Hash(ctx, hash)
|
||||
}
|
||||
// Read the data and encrypt it to calculate the hash
|
||||
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
|
||||
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)
|
||||
|
||||
@@ -77,7 +77,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
enc, err := f.cipher.newEncrypter(inBuf, nil)
|
||||
require.NoError(t, err)
|
||||
nonce := enc.nonce // read the nonce at the start
|
||||
_, err = io.Copy(&outBuf, enc)
|
||||
if f.opt.NoDataEncryption {
|
||||
_, err = outBuf.WriteString(contents)
|
||||
} else {
|
||||
_, err = io.Copy(&outBuf, enc)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
var oi fs.ObjectInfo = obj
|
||||
@@ -96,7 +100,12 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
assert.NotEqual(t, path, src.Remote())
|
||||
|
||||
// Test ObjectInfo.Hash
|
||||
wantHash := md5.Sum(outBuf.Bytes())
|
||||
var wantHash [md5.Size]byte
|
||||
if f.opt.NoDataEncryption {
|
||||
wantHash = md5.Sum([]byte(contents))
|
||||
} else {
|
||||
wantHash = md5.Sum(outBuf.Bytes())
|
||||
}
|
||||
gotHash, err := src.Hash(ctx, hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)
|
||||
|
||||
@@ -46,6 +46,7 @@ func TestStandardBase32(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -67,6 +68,7 @@ func TestStandardBase64(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -88,6 +90,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -109,6 +112,7 @@ func TestOff(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -131,6 +135,7 @@ func TestObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -154,5 +159,6 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ func TestIntegration(t *testing.T) {
|
||||
"OpenWriterAt",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
QuickTestOK: true,
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: ":memory:",
|
||||
NilObject: (*Object)(nil),
|
||||
RemoteName: ":memory:",
|
||||
NilObject: (*Object)(nil),
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -761,11 +761,7 @@ func init() {
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Help: "Wasabi AP Northeast endpoint",
|
||||
Provider: "Wasabi",
|
||||
}},
|
||||
}, {
|
||||
|
||||
@@ -42,8 +42,7 @@ const (
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
keepAliveInterval = time.Minute // send keepalives every this long while running commands
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -340,32 +339,6 @@ func (c *conn) wait() {
|
||||
c.err <- c.sshClient.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (c *conn) sendKeepAlive() {
|
||||
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send keepalives every interval over the ssh connection until done is closed
|
||||
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
|
||||
done = make(chan struct{})
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
c.sendKeepAlive()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return done
|
||||
}
|
||||
|
||||
// Closes the connection
|
||||
func (c *conn) close() error {
|
||||
sftpErr := c.sftpClient.Close()
|
||||
@@ -1125,9 +1098,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}
|
||||
defer f.putSftpConnection(&c, err)
|
||||
|
||||
// Send keepalives while the connection is open
|
||||
defer close(c.sendKeepAlives(keepAliveInterval))
|
||||
|
||||
session, err := c.sshClient.NewSession()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("run: get SFTP session: %w", err)
|
||||
@@ -1140,12 +1110,10 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
fs.Debugf(f, "Running remote command: %s", cmd)
|
||||
err = session.Run(cmd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
|
||||
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
|
||||
}
|
||||
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
@@ -1262,6 +1230,8 @@ func (o *Object) Remote() string {
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
if o.fs.opt.DisableHashCheck {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1285,16 +1255,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
c, err := o.fs.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
|
||||
}
|
||||
session, err := c.sshClient.NewSession()
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
|
||||
}
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
escapedPath := shellEscape(o.path())
|
||||
if o.fs.opt.PathOverride != "" {
|
||||
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||
}
|
||||
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
|
||||
err = session.Run(hashCmd + " " + escapedPath)
|
||||
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
|
||||
_ = session.Close()
|
||||
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
|
||||
return "", nil
|
||||
}
|
||||
|
||||
_ = session.Close()
|
||||
b := stdout.Bytes()
|
||||
fs.Debugf(nil, "sftp output = %q", b)
|
||||
str := parseHash(b)
|
||||
fs.Debugf(nil, "sftp hash = %q", str)
|
||||
if r == hash.MD5 {
|
||||
o.md5sum = &str
|
||||
} else if r == hash.SHA1 {
|
||||
|
||||
@@ -42,6 +42,7 @@ func TestStandard(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -64,6 +65,7 @@ func TestRO(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -86,6 +88,7 @@ func TestNC(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -108,6 +111,7 @@ func TestPolicy1(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -130,6 +134,7 @@ func TestPolicy2(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -152,5 +157,6 @@ func TestPolicy3(t *testing.T) {
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -89,7 +91,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
|
||||
return nil, err
|
||||
}
|
||||
f.RootFs = rFs
|
||||
rootString := fspath.JoinRootPath(remote, root)
|
||||
rootString := path.Join(remote, filepath.ToSlash(root))
|
||||
myFs, err := cache.Get(ctx, rootString)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
|
||||
@@ -66,11 +66,6 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -84,9 +79,8 @@ func init() {
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Token string `config:"token"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Token string `config:"token"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote yandex
|
||||
@@ -636,7 +630,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
}
|
||||
//delete directory
|
||||
return f.delete(ctx, root, f.opt.HardDelete)
|
||||
return f.delete(ctx, root, false)
|
||||
}
|
||||
|
||||
// Rmdir deletes the container
|
||||
@@ -1147,7 +1141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
|
||||
return o.fs.delete(ctx, o.filePath(), false)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
|
||||
@@ -550,6 +550,3 @@ put them back in again.` >}}
|
||||
* Fredric Arklid <fredric.arklid@consid.se>
|
||||
* Andy Jackson <Andrew.Jackson@bl.uk>
|
||||
* Sinan Tan <i@tinytangent.com>
|
||||
* deinferno <14363193+deinferno@users.noreply.github.com>
|
||||
* rsapkf <rsapkfff@pm.me>
|
||||
* Will Holtz <wholtz@gmail.com>
|
||||
|
||||
@@ -81,14 +81,6 @@ key. It is stored using RFC3339 Format time with nanosecond
|
||||
precision. The metadata is supplied during directory listings so
|
||||
there is no overhead to using it.
|
||||
|
||||
### Performance
|
||||
|
||||
When uploading large files, increasing the value of
|
||||
`--azureblob-upload-concurrency` will increase performance at the cost
|
||||
of using more memory. The default of 16 is set quite conservatively to
|
||||
use less memory. It maybe be necessary raise it to 64 or higher to
|
||||
fully utilize a 1 GBit/s link with a single file transfer.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||
|
||||
@@ -107,9 +107,8 @@ At the end of the non interactive process, rclone will return a result
|
||||
with `State` as empty string.
|
||||
|
||||
If `--all` is passed then rclone will ask all the config questions,
|
||||
not just the post config questions. Parameters that are supplied on
|
||||
the command line or from environment variables are used as defaults
|
||||
for questions as usual.
|
||||
not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that `bin/config.py` in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
|
||||
@@ -80,7 +80,7 @@ List all the files in your pCloud
|
||||
|
||||
rclone ls remote:
|
||||
|
||||
To copy a local directory to a pCloud directory called backup
|
||||
To copy a local directory to an pCloud directory called backup
|
||||
|
||||
rclone copy /home/source remote:backup
|
||||
|
||||
|
||||
@@ -1062,9 +1062,7 @@ Required when using an S3 clone.
|
||||
- "s3.eu-central-1.wasabisys.com"
|
||||
- Wasabi EU Central endpoint
|
||||
- "s3.ap-northeast-1.wasabisys.com"
|
||||
- Wasabi AP Northeast 1 (Tokyo) endpoint
|
||||
- "s3.ap-northeast-2.wasabisys.com"
|
||||
- Wasabi AP Northeast 2 (Osaka) endpoint
|
||||
- Wasabi AP Northeast endpoint
|
||||
|
||||
#### --s3-location-constraint
|
||||
|
||||
|
||||
@@ -620,7 +620,7 @@ issue](https://github.com/pkg/sftp/issues/156) is fixed.
|
||||
Note that since SFTP isn't HTTP based the following flags don't work
|
||||
with it: `--dump-headers`, `--dump-bodies`, `--dump-auth`
|
||||
|
||||
Note that `--timeout` and `--contimeout` are both supported.
|
||||
Note that `--timeout` isn't supported (but `--contimeout` is).
|
||||
|
||||
|
||||
## C14 {#c14}
|
||||
|
||||
@@ -175,15 +175,6 @@ Leave blank to use the provider defaults.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --yandex-hard-delete
|
||||
|
||||
Delete files permanently rather than putting them into the trash.
|
||||
|
||||
- Config: hard_delete
|
||||
- Env Var: RCLONE_YANDEX_HARD_DELETE
|
||||
- Type: bool
|
||||
- Default: false
|
||||
|
||||
#### --yandex-encoding
|
||||
|
||||
This sets the encoding for the backend.
|
||||
|
||||
@@ -24,11 +24,9 @@ import (
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
const inputBufferSize = 16 * 1024
|
||||
|
||||
// ReadLine reads some input
|
||||
var ReadLine = func() string {
|
||||
buf := bufio.NewReaderSize(os.Stdin, inputBufferSize)
|
||||
buf := bufio.NewReader(os.Stdin)
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read line: %v", err)
|
||||
|
||||
@@ -136,14 +136,12 @@ func NewClient(ctx context.Context) *http.Client {
|
||||
// Transport is our http Transport which wraps an http.Transport
|
||||
// * Sets the User Agent
|
||||
// * Does logging
|
||||
// * Updates metrics
|
||||
type Transport struct {
|
||||
*http.Transport
|
||||
dump fs.DumpFlags
|
||||
filterRequest func(req *http.Request)
|
||||
userAgent string
|
||||
headers []*fs.HTTPOption
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// newTransport wraps the http.Transport passed in and logs all
|
||||
@@ -154,7 +152,6 @@ func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
|
||||
dump: ci.Dump,
|
||||
userAgent: ci.UserAgent,
|
||||
headers: ci.Headers,
|
||||
metrics: DefaultMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,9 +283,6 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
||||
fs.Debugf(nil, "%s", separatorResp)
|
||||
logMutex.Unlock()
|
||||
}
|
||||
// Update metrics
|
||||
t.metrics.onResponse(req, resp)
|
||||
|
||||
if err == nil {
|
||||
checkServerTime(req, resp)
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
package fshttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Metrics provide Transport HTTP level metrics.
|
||||
type Metrics struct {
|
||||
StatusCode *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewMetrics creates a new metrics instance, the instance shall be assigned to
|
||||
// DefaultMetrics before any processing takes place.
|
||||
func NewMetrics(namespace string) *Metrics {
|
||||
return &Metrics{
|
||||
StatusCode: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: "http",
|
||||
Name: "status_code",
|
||||
}, []string{"host", "method", "code"}),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultMetrics specifies metrics used for new Transports.
|
||||
var DefaultMetrics = (*Metrics)(nil)
|
||||
|
||||
// Collectors returns all prometheus metrics as collectors for registration.
|
||||
func (m *Metrics) Collectors() []prometheus.Collector {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
return []prometheus.Collector{
|
||||
m.StatusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) onResponse(req *http.Request, resp *http.Response) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var statusCode = 0
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
|
||||
m.StatusCode.WithLabelValues(req.Host, req.Method, fmt.Sprint(statusCode)).Inc()
|
||||
}
|
||||
@@ -405,7 +405,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
if err == nil {
|
||||
dst = newDst
|
||||
in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server-side transfer
|
||||
_ = in.Close()
|
||||
err = in.Close()
|
||||
} else {
|
||||
_ = in.Close()
|
||||
}
|
||||
@@ -598,8 +598,6 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
}
|
||||
}
|
||||
// Move dst <- src
|
||||
in := tr.Account(ctx, nil) // account the transfer
|
||||
in.ServerSideCopyStart()
|
||||
newDst, err = doMove(ctx, src, remote)
|
||||
switch err {
|
||||
case nil:
|
||||
@@ -608,16 +606,13 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
} else {
|
||||
fs.Infof(src, "Moved (server-side)")
|
||||
}
|
||||
in.ServerSideCopyEnd(newDst.Size()) // account the bytes for the server-side transfer
|
||||
_ = in.Close()
|
||||
|
||||
return newDst, nil
|
||||
case fs.ErrorCantMove:
|
||||
fs.Debugf(src, "Can't move, switching to copy")
|
||||
_ = in.Close()
|
||||
default:
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, "Couldn't move: %v", err)
|
||||
_ = in.Close()
|
||||
return newDst, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,22 +18,23 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/rc/webgui"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/webgui"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
)
|
||||
|
||||
var promHandler http.Handler
|
||||
@@ -42,13 +43,6 @@ var onlyOnceWarningAllowOrigin sync.Once
|
||||
func init() {
|
||||
rcloneCollector := accounting.NewRcloneCollector(context.Background())
|
||||
prometheus.MustRegister(rcloneCollector)
|
||||
|
||||
m := fshttp.NewMetrics("rclone")
|
||||
for _, c := range m.Collectors() {
|
||||
prometheus.MustRegister(c)
|
||||
}
|
||||
fshttp.DefaultMetrics = m
|
||||
|
||||
promHandler = promhttp.Handler()
|
||||
}
|
||||
|
||||
|
||||
@@ -295,6 +295,7 @@ type Opt struct {
|
||||
SkipFsCheckWrap bool // if set skip FsCheckWrap
|
||||
SkipObjectCheckWrap bool // if set skip ObjectCheckWrap
|
||||
SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks
|
||||
QuickTestOK bool // OK to run this test in "make quicktest"
|
||||
}
|
||||
|
||||
// returns true if x is found in ss
|
||||
@@ -359,7 +360,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever
|
||||
)
|
||||
|
||||
if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
|
||||
if !opt.QuickTestOK && strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" {
|
||||
t.Skip("quicktest only")
|
||||
}
|
||||
|
||||
@@ -441,10 +442,6 @@ func Run(t *testing.T, opt *Opt) {
|
||||
}
|
||||
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
|
||||
|
||||
// Get fsInfo which contains type, etc. of the fs
|
||||
fsInfo, _, _, _, err := fs.ConfigFs(subRemoteName)
|
||||
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
|
||||
|
||||
// Skip the rest if it failed
|
||||
skipIfNotOk(t)
|
||||
|
||||
@@ -1591,30 +1588,12 @@ func Run(t *testing.T, opt *Opt) {
|
||||
t.Run("PublicLink", func(t *testing.T) {
|
||||
skipIfNotOk(t)
|
||||
|
||||
publicLinkFunc := f.Features().PublicLink
|
||||
if publicLinkFunc == nil {
|
||||
doPublicLink := f.Features().PublicLink
|
||||
if doPublicLink == nil {
|
||||
t.Skip("FS has no PublicLinker interface")
|
||||
}
|
||||
|
||||
type PublicLinkFunc func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error)
|
||||
wrapPublicLinkFunc := func(f PublicLinkFunc) PublicLinkFunc {
|
||||
return func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
link, err = publicLinkFunc(ctx, remote, expire, unlink)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
// For OneDrive Personal, link expiry is a premium feature
|
||||
// Don't let it fail the test (https://github.com/rclone/rclone/issues/5420)
|
||||
if fsInfo.Name == "onedrive" && strings.Contains(err.Error(), "accountUpgradeRequired") {
|
||||
t.Log("treating accountUpgradeRequired as success for PublicLink")
|
||||
link, err = "bogus link to "+remote, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
expiry := fs.Duration(60 * time.Second)
|
||||
doPublicLink := wrapPublicLinkFunc(publicLinkFunc)
|
||||
|
||||
// if object not found
|
||||
link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false)
|
||||
@@ -1661,7 +1640,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
_, err = subRemote.Put(ctx, buf, obji)
|
||||
require.NoError(t, err)
|
||||
|
||||
link4, err := wrapPublicLinkFunc(subRemote.Features().PublicLink)(ctx, "", expiry, false)
|
||||
link4, err := subRemote.Features().PublicLink(ctx, "", expiry, false)
|
||||
require.NoError(t, err, "Sharing root in a sub-remote should work")
|
||||
require.NotEqual(t, "", link4, "Link should not be empty")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user