1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-27 06:43:27 +00:00

Compare commits

..

1 Commits

135 changed files with 186 additions and 397 deletions

View File

@@ -19,7 +19,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
@@ -50,6 +50,8 @@ const (
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -132,33 +134,12 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size.
Help: `Upload chunk size (<= 100 MiB).
Note that this is stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
"--transfers" chunks stored at once in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -276,7 +257,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -436,6 +416,9 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -1684,10 +1667,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: o.fs.opt.UploadConcurrency,
MaxBuffers: uploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
}
// Don't retry, return a retry error instead

View File

@@ -17,10 +17,12 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
})
}

View File

@@ -17,7 +17,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"

View File

@@ -13,7 +13,7 @@ import (
gohash "hash"
"io"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"

View File

@@ -23,7 +23,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"

View File

@@ -13,7 +13,7 @@ import (
"io"
"net/http"
"strconv"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/box/api"

View File

@@ -16,7 +16,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"syscall"
"time"

View File

@@ -11,7 +11,7 @@ import (
"path"
"runtime"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -8,7 +8,7 @@ import (
"fmt"
"io"
"path"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -12,7 +12,7 @@ import (
"net/http"
"net/url"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
cache "github.com/patrickmn/go-cache"

View File

@@ -14,7 +14,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -19,7 +19,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -13,7 +13,7 @@ import (
"io"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"unicode/utf8"

View File

@@ -21,7 +21,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"text/template"
"time"
@@ -1693,6 +1693,11 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
var paths []string
var grouping int32
usingQueryFilter := false
if fi, use := filter.GetConfig(ctx), filter.GetUseFilter(ctx); fi != nil && use {
usingQueryFilter = true
}
for dir := range in {
dirs = append(dirs[:0], dir.id)
paths = append(paths[:0], dir.path)
@@ -1765,7 +1770,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len(dirs) > 1 && !foundItems {
// However, empty result is legitimate if query filter was applied.
if len(dirs) > 1 && !foundItems && !usingQueryFilter {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -1783,7 +1789,8 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
}
// If using a grouping of 1 and dir was empty then check to see if it
// is part of the group that caused grouping to be disabled.
if grouping == 1 && len(dirs) == 1 && !foundItems {
// However, empty result is legitimate if query filter was applied.
if grouping == 1 && len(dirs) == 1 && !foundItems && !usingQueryFilter {
f.listRmu.Lock()
if _, found := f.listRempties[dirs[0]]; found {
// Remove the ID

View File

@@ -10,7 +10,7 @@ import (
"context"
"errors"
"fmt"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/async"

View File

@@ -25,7 +25,7 @@ import (
"net/url"
"path"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"

View File

@@ -12,7 +12,7 @@ import (
"path"
"runtime"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/jlaffaye/ftp"

View File

@@ -5,7 +5,7 @@ package googlephotos
import (
"path"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/backend/googlephotos/api"
)

View File

@@ -15,7 +15,7 @@ import (
"regexp"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/googlephotos/api"

View File

@@ -9,7 +9,7 @@ import (
"io"
"path"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -15,7 +15,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -13,7 +13,7 @@ import (
"path/filepath"
"runtime"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"unicode/utf8"

View File

@@ -3,7 +3,7 @@ package local
import (
"io/ioutil"
"os"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"time"

View File

@@ -12,7 +12,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"encoding/hex"

View File

@@ -22,7 +22,7 @@ import (
"io"
"path"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -11,7 +11,7 @@ import (
"io/ioutil"
"path"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -16,7 +16,7 @@ import (
"regexp"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/onedrive/api"

View File

@@ -13,7 +13,7 @@ import (
"hash"
"io"
"sort"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"

View File

@@ -19,7 +19,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"

View File

@@ -4,7 +4,7 @@ import (
"context"
"fmt"
"net/url"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -10,7 +10,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/coreos/go-semver/semver"

View File

@@ -17,7 +17,7 @@ import (
"regexp"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"
@@ -42,8 +42,7 @@ const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
keepAliveInterval = time.Minute // send keepalives every this long while running commands
decayConstant = 2 // bigger for slower decay, exponential
)
var (
@@ -340,32 +339,6 @@ func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
c.sendKeepAlive()
case <-done:
return
}
}
}()
return done
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
@@ -1125,9 +1098,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}
defer f.putSftpConnection(&c, err)
// Send keepalives while the connection is open
defer close(c.sendKeepAlives(keepAliveInterval))
session, err := c.sshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("run: get SFTP session: %w", err)
@@ -1140,12 +1110,10 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
if err != nil {
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
}
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
return stdout.Bytes(), nil
}
@@ -1262,6 +1230,8 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
if o.fs.opt.DisableHashCheck {
return "", nil
}
@@ -1285,16 +1255,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
if err != nil {
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {

View File

@@ -3,7 +3,7 @@
package sftp
import "github.com/rclone/rclone/lib/sync"
import "sync"
// stringLock locks for string IDs passed in
type stringLock struct {

View File

@@ -5,7 +5,7 @@ package sftp
import (
"fmt"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"time"

View File

@@ -13,7 +13,7 @@ import (
"fmt"
"io"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/backend/sharefile/api"
"github.com/rclone/rclone/fs"

View File

@@ -23,7 +23,7 @@ import (
"regexp"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/sugarsync/api"

View File

@@ -4,7 +4,7 @@ import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/union/upstream"

View File

@@ -3,7 +3,7 @@ package policy
import (
"context"
"path"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/backend/union/upstream"
"github.com/rclone/rclone/fs"

View File

@@ -3,7 +3,7 @@ package policy
import (
"context"
"path"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/union/upstream"

View File

@@ -9,7 +9,7 @@ import (
"path"
"path/filepath"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/union/policy"

View File

@@ -9,7 +9,7 @@ import (
"path"
"path/filepath"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"

View File

@@ -6,7 +6,7 @@ import (
"regexp"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -21,7 +21,7 @@ import (
"path"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/backend/webdav/api"

View File

@@ -66,11 +66,6 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -84,9 +79,8 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
Token string `config:"token"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote yandex
@@ -636,7 +630,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
}
//delete directory
return f.delete(ctx, root, f.opt.HardDelete)
return f.delete(ctx, root, false)
}
// Rmdir deletes the container
@@ -1147,7 +1141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
return o.fs.delete(ctx, o.filePath(), false)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -19,7 +19,7 @@ import (
"runtime"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"text/template"
"time"

View File

@@ -19,7 +19,7 @@ import (
"runtime/pprof"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -9,7 +9,7 @@ import (
"io"
"os"
"path"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"

View File

@@ -12,7 +12,7 @@ import (
"os"
"path/filepath"
"sort"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
)

View File

@@ -7,7 +7,7 @@ import (
"os"
"runtime"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/cmd"

View File

@@ -5,7 +5,7 @@ import (
"errors"
"log"
"sort"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -5,7 +5,7 @@ import (
"context"
"fmt"
"path"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"

View File

@@ -6,7 +6,7 @@ import (
"bytes"
"fmt"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -3,7 +3,7 @@ package rcd
import (
"context"
"log"
"github.com/rclone/rclone/lib/sync"
"sync"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/rclone/rclone/cmd"

View File

@@ -10,7 +10,7 @@ import (
"path/filepath"
"reflect"
"sort"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"

View File

@@ -14,7 +14,7 @@ import (
"os"
"os/user"
"strconv"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/cmd"

View File

@@ -5,7 +5,7 @@ package restic
import (
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
)

View File

@@ -16,7 +16,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/cmd"

View File

@@ -3,7 +3,7 @@ package memory
import (
"context"
"runtime"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/test"

View File

@@ -550,6 +550,3 @@ put them back in again.` >}}
* Fredric Arklid <fredric.arklid@consid.se>
* Andy Jackson <Andrew.Jackson@bl.uk>
* Sinan Tan <i@tinytangent.com>
* deinferno <14363193+deinferno@users.noreply.github.com>
* rsapkf <rsapkfff@pm.me>
* Will Holtz <wholtz@gmail.com>

View File

@@ -81,14 +81,6 @@ key. It is stored using RFC3339 Format time with nanosecond
precision. The metadata is supplied during directory listings so
there is no overhead to using it.
### Performance
When uploading large files, increasing the value of
`--azureblob-upload-concurrency` will increase performance at the cost
of using more memory. The default of 16 is set quite conservatively to
use less memory. It maybe be necessary raise it to 64 or higher to
fully utilize a 1 GBit/s link with a single file transfer.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)

View File

@@ -107,9 +107,8 @@ At the end of the non interactive process, rclone will return a result
with `State` as empty string.
If `--all` is passed then rclone will ask all the config questions,
not just the post config questions. Parameters that are supplied on
the command line or from environment variables are used as defaults
for questions as usual.
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration.

View File

@@ -80,7 +80,7 @@ List all the files in your pCloud
rclone ls remote:
To copy a local directory to a pCloud directory called backup
To copy a local directory to an pCloud directory called backup
rclone copy /home/source remote:backup

View File

@@ -620,7 +620,7 @@ issue](https://github.com/pkg/sftp/issues/156) is fixed.
Note that since SFTP isn't HTTP based the following flags don't work
with it: `--dump-headers`, `--dump-bodies`, `--dump-auth`
Note that `--timeout` and `--contimeout` are both supported.
Note that `--timeout` isn't supported (but `--contimeout` is).
## C14 {#c14}

View File

@@ -175,15 +175,6 @@ Leave blank to use the provider defaults.
- Type: string
- Default: ""
#### --yandex-hard-delete
Delete files permanently rather than putting them into the trash.
- Config: hard_delete
- Env Var: RCLONE_YANDEX_HARD_DELETE
- Type: bool
- Default: false
#### --yandex-encoding
This sets the encoding for the backend.

View File

@@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"unicode/utf8"

View File

@@ -2,7 +2,7 @@ package accounting
import (
"context"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
)

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -2,7 +2,7 @@ package accounting
import (
"context"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs/rc"

View File

@@ -4,11 +4,11 @@ import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/sync"
"golang.org/x/time/rate"
)

View File

@@ -4,7 +4,7 @@ import (
"context"
"encoding/json"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -5,7 +5,7 @@ import (
"fmt"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"

View File

@@ -6,7 +6,7 @@ import (
"context"
"errors"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -9,7 +9,7 @@ import (
"io/ioutil"
"math/rand"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"testing/iotest"
"time"

2
fs/cache/cache.go vendored
View File

@@ -4,7 +4,7 @@ package cache
import (
"context"
"runtime"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"

View File

@@ -4,7 +4,7 @@ import (
"context"
"errors"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"

View File

@@ -8,7 +8,7 @@ import (
"os"
"path/filepath"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/Unknwon/goconfig"
"github.com/rclone/rclone/fs"

View File

@@ -2,7 +2,7 @@ package config
import (
"encoding/json"
"github.com/rclone/rclone/lib/sync"
"sync"
)
// defaultStorage implements config.Storage, providing in-memory config.

View File

@@ -6,7 +6,7 @@ import (
"io/ioutil"
"os"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"time"

View File

@@ -7,7 +7,7 @@ import (
"fmt"
"os"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"time"

View File

@@ -5,7 +5,7 @@ import (
"net"
"runtime"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -12,7 +12,7 @@ import (
"net/http"
"net/http/cookiejar"
"net/http/httputil"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -7,7 +7,7 @@ import (
"path"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"

View File

@@ -7,7 +7,7 @@ import (
"errors"
"fmt"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
_ "github.com/rclone/rclone/backend/local"

View File

@@ -10,7 +10,7 @@ import (
"os"
"regexp"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"github.com/rclone/rclone/fs"

View File

@@ -18,7 +18,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"
@@ -405,7 +405,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
if err == nil {
dst = newDst
in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server-side transfer
_ = in.Close()
err = in.Close()
} else {
_ = in.Close()
}
@@ -598,8 +598,6 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
}
}
// Move dst <- src
in := tr.Account(ctx, nil) // account the transfer
in.ServerSideCopyStart()
newDst, err = doMove(ctx, src, remote)
switch err {
case nil:
@@ -608,16 +606,13 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
} else {
fs.Infof(src, "Moved (server-side)")
}
in.ServerSideCopyEnd(newDst.Size()) // account the bytes for the server-side transfer
_ = in.Close()
return newDst, nil
case fs.ErrorCantMove:
fs.Debugf(src, "Can't move, switching to copy")
_ = in.Close()
default:
err = fs.CountError(err)
fs.Errorf(src, "Couldn't move: %v", err)
_ = in.Close()
return newDst, err
}
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"errors"
"io"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"

View File

@@ -7,7 +7,7 @@ import (
"errors"
"fmt"
"runtime/debug"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"time"

View File

@@ -15,7 +15,7 @@ import (
"regexp"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"

View File

@@ -6,7 +6,7 @@ import (
"context"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
)
// Func defines a type for a remote control function

View File

@@ -12,7 +12,7 @@ import (
"path/filepath"
"regexp"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"

View File

@@ -6,7 +6,7 @@ import (
"math/bits"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"github.com/aalpar/deheap"
"github.com/rclone/rclone/fs"

View File

@@ -3,7 +3,7 @@ package sync
import (
"container/heap"
"context"
"github.com/rclone/rclone/lib/sync"
"sync"
"sync/atomic"
"testing"

View File

@@ -8,7 +8,7 @@ import (
"path"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -8,7 +8,7 @@ import (
"path"
"sort"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -6,7 +6,7 @@ import (
"fmt"
"io"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"testing"
"github.com/rclone/rclone/fs"

View File

@@ -441,10 +441,6 @@ func Run(t *testing.T, opt *Opt) {
}
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
// Get fsInfo which contains type, etc. of the fs
fsInfo, _, _, _, err := fs.ConfigFs(subRemoteName)
require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err))
// Skip the rest if it failed
skipIfNotOk(t)
@@ -1591,30 +1587,12 @@ func Run(t *testing.T, opt *Opt) {
t.Run("PublicLink", func(t *testing.T) {
skipIfNotOk(t)
publicLinkFunc := f.Features().PublicLink
if publicLinkFunc == nil {
doPublicLink := f.Features().PublicLink
if doPublicLink == nil {
t.Skip("FS has no PublicLinker interface")
}
type PublicLinkFunc func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error)
wrapPublicLinkFunc := func(f PublicLinkFunc) PublicLinkFunc {
return func(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
link, err = publicLinkFunc(ctx, remote, expire, unlink)
if err == nil {
return
}
// For OneDrive Personal, link expiry is a premium feature
// Don't let it fail the test (https://github.com/rclone/rclone/issues/5420)
if fsInfo.Name == "onedrive" && strings.Contains(err.Error(), "accountUpgradeRequired") {
t.Log("treating accountUpgradeRequired as success for PublicLink")
link, err = "bogus link to "+remote, nil
}
return
}
}
expiry := fs.Duration(60 * time.Second)
doPublicLink := wrapPublicLinkFunc(publicLinkFunc)
// if object not found
link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false)
@@ -1661,7 +1639,7 @@ func Run(t *testing.T, opt *Opt) {
_, err = subRemote.Put(ctx, buf, obji)
require.NoError(t, err)
link4, err := wrapPublicLinkFunc(subRemote.Features().PublicLink)(ctx, "", expiry, false)
link4, err := subRemote.Features().PublicLink(ctx, "", expiry, false)
require.NoError(t, err, "Sharing root in a sub-remote should work")
require.NotEqual(t, "", link4, "Link should not be empty")
}

View File

@@ -17,7 +17,7 @@ import (
"sort"
"strconv"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

View File

@@ -11,7 +11,7 @@ import (
"path/filepath"
"regexp"
"strings"
"github.com/rclone/rclone/lib/sync"
"sync"
"time"
"github.com/rclone/rclone/fs"

4
go.mod
View File

@@ -10,7 +10,7 @@ require (
github.com/Azure/azure-storage-blob-go v0.14.0
github.com/Azure/go-autorest/autorest/adal v0.9.17
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c
github.com/Max-Sum/base32768 v0.0.0-20191205131208-7937843c71d5
github.com/Max-Sum/base32768 v0.0.0-20191205131208-7937843c71d5 // indirect
github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8
github.com/a8m/tree v0.0.0-20210414114729-ce3525c5c2ef
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
@@ -49,7 +49,6 @@ require (
github.com/prometheus/client_golang v1.11.0
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
github.com/rfjakob/eme v1.1.2
github.com/sasha-s/go-deadlock v0.3.1
github.com/shirou/gopsutil/v3 v3.21.10
github.com/sirupsen/logrus v1.8.1
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
@@ -109,7 +108,6 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect

4
go.sum
View File

@@ -502,8 +502,6 @@ github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwp
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU=
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -557,8 +555,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA=

Some files were not shown because too many files have changed in this diff Show More