1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-31 15:43:53 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
f499c625bc docker: make docker volume accept all global options
Fixes #8401
2025-09-09 16:27:04 +01:00
64 changed files with 170 additions and 554 deletions

View File

@@ -106,7 +106,6 @@ directories to and from different cloud storage providers.
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)

View File

@@ -2152,6 +2152,7 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
}
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
v := v
metadata[k] = &v
}
return metadata

View File

@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:

View File

@@ -187,6 +187,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
@@ -369,6 +370,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
@@ -635,6 +637,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)

View File

@@ -598,7 +598,7 @@ It doesn't return anything.
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err

View File

@@ -18,7 +18,7 @@ type headerLink struct {
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
for _, link := range strings.Split(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
for _, part := range strings.Split(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}

View File

@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for scope := range strings.SplitSeq(scopesString, ",") {
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for extension := range strings.SplitSeq(extensionText, ",") {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue

View File

@@ -386,6 +386,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
@@ -519,6 +520,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)

View File

@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID any `json:"fld_id"`
FldID interface{} `json:"fld_id"`
} `json:"result"`
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"path/filepath"
@@ -842,13 +841,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
err := os.Remove(localPath)
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
if os.Chmod(localPath, 0o600) == nil {
err = os.Remove(localPath)
}
}
return err
return os.Remove(localPath)
}
// Precision of the file system

View File

@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Local().Format(time.RFC3339Nano)
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)

View File

@@ -1,40 +0,0 @@
//go:build windows
package local
import (
"context"
"path/filepath"
"runtime"
"syscall"
"testing"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
// Microsoft docs indicate that "This attribute is not honored on directories."
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
// and https://github.com/golang/go/issues/26295
func TestRmdirWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skipf("windows only")
}
r := fstest.NewRun(t)
defer r.Finalise()
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
require.NoError(t, err)
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
require.NoError(t, err)
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
require.NoError(t, err)
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
assert.NoError(t, err)
}

View File

@@ -400,7 +400,7 @@ type quirks struct {
}
func (q *quirks) parseQuirks(option string) {
for flag := range strings.SplitSeq(option, ",") {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupAny = false
uniqueValidPatterns := make(map[string]any)
for pattern := range strings.SplitSeq(patternString, ",") {
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
if pattern == "" {
continue

View File

@@ -243,6 +243,7 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
numSet = 0
for k, v := range metadata {
k, v := k, v
switch k {
case "mtime":
t, err := time.Parse(timeFormatIn, v)
@@ -421,7 +422,12 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {

View File

@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
require.NoError(b, err)
require.Equal(b, len(buf), n)
h := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)

View File

@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize

View File

@@ -173,9 +173,6 @@ var providerOption = fs.Option{
}, {
Value: "Selectel",
Help: "Selectel Object Storage",
}, {
Value: "SpectraLogic",
Help: "Spectra Logic Black Pearl",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@@ -657,7 +654,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,Intercolo,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,Intercolo,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -2089,7 +2086,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,StackPath,Storj,TencentCOS,Petabox,Mega",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -2104,7 +2101,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade,Mega",
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade,Mega",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@@ -2162,7 +2159,7 @@ isn't set then "acl" is used instead.
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
header is added and the default (private) will be used.
`,
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade",
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
@@ -3774,8 +3771,6 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
useMultipartEtag = false // untested
useAlreadyExists = false // untested
case "SpectraLogic":
virtualHostStyle = false // path-style required
case "StackPath":
listObjectsV2 = false // untested
virtualHostStyle = false
@@ -6247,8 +6242,8 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
metaData := make(map[string]string)
for key, value := range resp.Header {
key = strings.ToLower(key)
if after, ok := strings.CutPrefix(key, "x-amz-meta-"); ok {
metaKey := after
if strings.HasPrefix(key, "x-amz-meta-") {
metaKey := strings.TrimPrefix(key, "x-amz-meta-")
metaData[metaKey] = value[0]
}
}

View File

@@ -200,7 +200,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
pool := newFilePool(ctx, fs, "testshare", "/test/path")
const numGoroutines = 10
for range numGoroutines {
for i := 0; i < numGoroutines; i++ {
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
}
@@ -208,7 +208,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
// Test concurrent get operations
done := make(chan bool, numGoroutines)
for range numGoroutines {
for i := 0; i < numGoroutines; i++ {
go func() {
defer func() { done <- true }()
@@ -219,7 +219,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
}()
}
for range numGoroutines {
for i := 0; i < numGoroutines; i++ {
<-done
}

View File

@@ -21,6 +21,7 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, u := range upstreams {
u := u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)

View File

@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for checksum := range strings.SplitSeq(checksums, " ") {
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]

View File

@@ -33,7 +33,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
}
logMap = map[string]string{}
logs = []string{}
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
for _, line := range bytes.Split(out, []byte{'\n'}) {
if len(line) == 0 {
continue
}

View File

@@ -522,7 +522,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
require.NoError(b.t, err)
b.step = 0
b.stopped = false
for line := range strings.SplitSeq(string(scenBuf), "\n") {
for _, line := range strings.Split(string(scenBuf), "\n") {
comment := strings.Index(line, "#")
if comment != -1 {
line = line[:comment]
@@ -936,7 +936,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
// splitLine splits scenario line into tokens and performs
// substitutions that involve whitespace or control chars.
func splitLine(line string) (args []string) {
for s := range strings.FieldsSeq(line) {
for _, s := range strings.Fields(line) {
b := []byte(whitespaceReplacer.Replace(s))
b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte {
c, _ := strconv.ParseUint(string(b[5:7]), 16, 8)
@@ -1513,7 +1513,7 @@ func (b *bisyncTest) compareResults() int {
fs.Log(nil, divider)
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") {
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
fs.Logf(nil, "| %s", strings.TrimSpace(line))
}
}

View File

@@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
return nil
}
var CompareFlag CompareOpt // for exclusions
opts := strings.SplitSeq(b.opt.CompareFlag, ",")
for opt := range opts {
opts := strings.Split(b.opt.CompareFlag, ",")
for _, opt := range opts {
switch strings.ToLower(strings.TrimSpace(opt)) {
case "size":
b.opt.Compare.Size = true

View File

@@ -434,6 +434,7 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
}
fulllisting, err = b.loadListingNum(listingNum)
if err != nil {
b.critical = true
b.retryable = true
@@ -609,11 +610,6 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
}
}
if srcNewName != "" { // if it was renamed and not deleted
if new == nil { // should not happen. log error and debug info
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
continue
}
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
}

View File

@@ -152,7 +152,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
items := []fstest.Item{}
for _, c := range alphabet {
var out strings.Builder
for i := range rune(7) {
for i := rune(0); i < 7; i++ {
out.WriteRune(c + i)
}
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))

View File

@@ -3,7 +3,6 @@ package copyurl
import (
"context"
"encoding/csv"
"errors"
"fmt"
"os"
@@ -13,9 +12,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/errcount"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
var (
@@ -24,7 +21,6 @@ var (
printFilename = false
stdout = false
noClobber = false
urls = false
)
func init() {
@@ -35,7 +31,6 @@ func init() {
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "")
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "")
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "")
flags.BoolVarP(cmdFlags, &urls, "urls", "", stdout, "Use a CSV file of links to process multiple URLs", "")
}
var commandDefinition = &cobra.Command{
@@ -59,17 +54,6 @@ destination if there is one with the same name.
Setting |--stdout| or making the output file name |-|
will cause the output to be written to standard output.
Setting |--urls| allows you to input a CSV file of URLs in format: URL,
FILENAME. If |--urls| is in use then replace the URL in the arguments with the
file containing the URLs, e.g.:
|||sh
rclone copyurl --urls myurls.csv remote:dir
|||
Missing filenames will be autogenerated equivalent to using |--auto-filename|.
Note that |--stdout| and |--print-filename| are incompatible with |--urls|.
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
is desired for all URLs then a file with only URLs and no filename can be used.
### Troubleshooting
If you can't get |rclone copyurl| to work then here are some things you can try:
@@ -86,93 +70,32 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
RunE: func(command *cobra.Command, args []string) (err error) {
cmd.CheckArgs(1, 2, command, args)
cmd.Run(true, true, command, func() error {
if !urls {
return run(args)
var dstFileName string
var fsdst fs.Fs
if !stdout {
if len(args) < 2 {
return errors.New("need 2 arguments if not using --stdout")
}
return runURLS(args)
if args[1] == "-" {
stdout = true
} else if autoFilename {
fsdst = cmd.NewFsDir(args[1:])
} else {
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
}
}
cmd.Run(true, true, command, func() error {
var dst fs.Object
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
if printFilename && err == nil && dst != nil {
fmt.Println(dst.Remote())
}
}
return err
})
return nil
},
}
var copyURL = operations.CopyURL // for testing
// runURLS processes a .csv file of urls and filenames
func runURLS(args []string) (err error) {
if stdout {
return errors.New("can't use --stdout with --urls")
}
if printFilename {
return errors.New("can't use --print-filename with --urls")
}
dstFs := cmd.NewFsDir(args[1:])
f, err := os.Open(args[0])
if err != nil {
return fmt.Errorf("failed to open .csv file: %w", err)
}
defer fs.CheckClose(f, &err)
reader := csv.NewReader(f)
reader.FieldsPerRecord = -1
urlList, err := reader.ReadAll()
if err != nil {
return fmt.Errorf("failed reading .csv file: %w", err)
}
ec := errcount.New()
g, gCtx := errgroup.WithContext(context.Background())
ci := fs.GetConfig(gCtx)
g.SetLimit(ci.Transfers)
for _, urlEntry := range urlList {
if len(urlEntry) == 0 {
continue
}
g.Go(func() error {
url := urlEntry[0]
var filename string
if len(urlEntry) > 1 {
filename = urlEntry[1]
}
_, err := copyURL(gCtx, dstFs, filename, url, filename == "", headerFilename, noClobber)
if err != nil {
fs.Errorf(filename, "failed to copy URL %q: %v", url, err)
ec.Add(err)
}
return nil
})
}
ec.Add(g.Wait())
return ec.Err("not all URLs copied successfully")
}
// run runs the command for a single URL
func run(args []string) error {
var err error
var dstFileName string
var fsdst fs.Fs
if !stdout {
if len(args) < 2 {
return errors.New("need 2 arguments if not using --stdout")
}
if args[1] == "-" {
stdout = true
} else if autoFilename {
fsdst = cmd.NewFsDir(args[1:])
} else {
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
}
}
var dst fs.Object
if stdout {
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
} else {
dst, err = copyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
if printFilename && err == nil && dst != nil {
fmt.Println(dst.Remote())
}
}
return err
}

View File

@@ -1,157 +0,0 @@
package copyurl
import (
"context"
"errors"
"os"
"path/filepath"
"sync"
"sync/atomic"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func resetGlobals() {
autoFilename = false
headerFilename = false
printFilename = false
stdout = false
noClobber = false
urls = false
copyURL = operations.CopyURL
}
func TestRun_RequiresTwoArgsWhenNotStdout(t *testing.T) {
t.Cleanup(resetGlobals)
resetGlobals()
err := run([]string{"https://example.com/foo"})
require.Error(t, err)
assert.Contains(t, err.Error(), "need 2 arguments if not using --stdout")
}
func TestRun_CallsCopyURL_WithExplicitFilename_Success(t *testing.T) {
t.Cleanup(resetGlobals)
resetGlobals()
tmp := t.TempDir()
dstPath := filepath.Join(tmp, "out.txt")
var called int32
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
atomic.AddInt32(&called, 1)
assert.Equal(t, "https://example.com/file", url)
assert.Equal(t, "out.txt", dstFileName)
assert.False(t, auto)
assert.False(t, header)
assert.False(t, noclobber)
return nil, nil
}
err := run([]string{"https://example.com/file", dstPath})
require.NoError(t, err)
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
}
func TestRun_CallsCopyURL_WithAutoFilename_AndPropagatesError(t *testing.T) {
t.Cleanup(resetGlobals)
resetGlobals()
tmp := t.TempDir()
autoFilename = true
want := errors.New("boom")
var called int32
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
atomic.AddInt32(&called, 1)
assert.Equal(t, "", dstFileName) // auto filename -> empty
assert.True(t, auto)
return nil, want
}
err := run([]string{"https://example.com/auto/name", tmp})
require.Error(t, err)
assert.Equal(t, want, err)
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
}
func TestRunURLS_ErrorsWithStdoutAndWithPrintFilename(t *testing.T) {
t.Cleanup(resetGlobals)
resetGlobals()
stdout = true
err := runURLS([]string{"dummy.csv", "destDir"})
require.Error(t, err)
assert.Contains(t, err.Error(), "can't use --stdout with --urls")
resetGlobals()
printFilename = true
err = runURLS([]string{"dummy.csv", "destDir"})
require.Error(t, err)
assert.Contains(t, err.Error(), "can't use --print-filename with --urls")
}
func TestRunURLS_ProcessesCSV_ParallelCalls_AndAggregatesError(t *testing.T) {
t.Cleanup(resetGlobals)
resetGlobals()
tmp := t.TempDir()
csvPath := filepath.Join(tmp, "urls.csv")
csvContent := []byte(
"https://example.com/a,aaa.txt\n" + // success
"https://example.com/b\n" + // auto filename
"https://example.com/c,ccc.txt\n") // error
require.NoError(t, os.WriteFile(csvPath, csvContent, 0o600))
// destination dir (local backend)
dest := t.TempDir()
// mock copyURL: succeed for /a and /b, fail for /c
var calls int32
var mu sync.Mutex
var seen []string
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
atomic.AddInt32(&calls, 1)
mu.Lock()
seen = append(seen, url+"|"+dstFileName)
mu.Unlock()
switch {
case url == "https://example.com/a":
require.Equal(t, "aaa.txt", dstFileName)
return nil, nil
case url == "https://example.com/b":
require.Equal(t, "", dstFileName) // auto-name path
return nil, nil
case url == "https://example.com/c":
return nil, errors.New("network down")
default:
return nil, nil
}
}
err := runURLS([]string{csvPath, dest})
require.Error(t, err)
assert.Contains(t, err.Error(), "not all URLs copied successfully")
// 3 lines => 3 calls
assert.Equal(t, int32(3), atomic.LoadInt32(&calls))
// sanity: all expected URLs were seen
assert.ElementsMatch(t,
[]string{
"https://example.com/a|aaa.txt",
"https://example.com/b|",
"https://example.com/c|ccc.txt",
},
seen,
)
}

View File

@@ -229,6 +229,7 @@ func TestEndToEnd(t *testing.T) {
skipE2eTestIfNecessary(t)
for _, mode := range allLayoutModes() {
mode := mode
t.Run(string(mode), func(t *testing.T) {
t.Parallel()
@@ -257,6 +258,7 @@ func TestEndToEndMigration(t *testing.T) {
}
for _, mode := range allLayoutModes() {
mode := mode
t.Run(string(mode), func(t *testing.T) {
t.Parallel()
@@ -316,6 +318,7 @@ func TestEndToEndRepoLayoutCompat(t *testing.T) {
}
for _, mode := range allLayoutModes() {
mode := mode
t.Run(string(mode), func(t *testing.T) {
t.Parallel()

View File

@@ -344,7 +344,7 @@ func showBackend(name string) {
}
for _, ex := range opt.Examples {
fmt.Printf(" - %s\n", quoteString(ex.Value))
for line := range strings.SplitSeq(ex.Help, "\n") {
for _, line := range strings.Split(ex.Help, "\n") {
fmt.Printf(" - %s\n", line)
}
}

View File

@@ -1,6 +1,7 @@
package docker
import (
"context"
"fmt"
"strings"
@@ -112,6 +113,7 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
}
mntMap := configmap.Simple{}
vfsMap := configmap.Simple{}
globalMap := configmap.Simple{}
for key := range opt {
var ok bool
var err error
@@ -144,6 +146,13 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
ok = true
}
}
if !ok {
// try as a global option in globalMap
if fs.ConfigOptionsInfo.Get(underscoreKey) != nil {
globalMap[underscoreKey] = vol.Options[key]
ok = true
}
}
if !ok {
// try as a backend option in fsOpt (backends use "_" instead of "-")
@@ -172,6 +181,20 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
return fmt.Errorf("cannot parse mount options: %w", err)
}
// Parse Global options
if len(globalMap) > 0 {
ctx := context.Background()
ci := fs.GetConfig(ctx)
err = configstruct.Set(globalMap, ci)
if err != nil {
return fmt.Errorf("cannot parse global options: %w", err)
}
err = ci.Reload(ctx)
if err != nil {
return fmt.Errorf("failed to reload global options: %w", err)
}
}
// build remote string from fsName, fsType, fsOpt, fsPath
colon := ":"
comma := ","

View File

@@ -66,6 +66,7 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) {
var wg sync.WaitGroup
for i := range 100 {
i := i
wg.Add(1)
go func() {
defer wg.Done()
@@ -124,6 +125,7 @@ func TestCache(t *testing.T) {
}()
billyFS := &FS{nil} // place holder billyFS
for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} {
cacheType := cacheType
t.Run(cacheType.String(), func(t *testing.T) {
h := &Handler{
vfs: vfs.New(object.MemoryFs, nil),

View File

@@ -182,7 +182,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
// Obscure any values in the config map that need it
obscureFields, ok := config.Get("_obscure")
if ok {
for key := range strings.SplitSeq(obscureFields, ",") {
for _, key := range strings.Split(obscureFields, ",") {
value, ok := config.Get(key)
if ok {
obscuredValue, err := obscure.Obscure(value)

View File

@@ -34,7 +34,7 @@ func (r *results) checkBase32768() {
// Create test files
for _, c := range safeAlphabet {
var out strings.Builder
for i := range rune(32) {
for i := rune(0); i < 32; i++ {
out.WriteRune(c + i)
}
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))

View File

@@ -292,7 +292,7 @@ func (r *results) checkControls() {
tokens <- struct{}{}
}
var wg sync.WaitGroup
for i := range rune(128) {
for i := rune(0); i < 128; i++ {
s := string(i)
if i == 0 || i == '/' {
// We're not even going to check NULL or /

View File

@@ -95,7 +95,7 @@ func (e *Position) UnmarshalText(text []byte) error {
switch s := strings.ToLower(string(text)); s {
default:
*e = PositionNone
for p := range strings.SplitSeq(s, ",") {
for _, p := range strings.Split(s, ",") {
switch p {
case "left":
*e |= PositionLeft

View File

@@ -351,7 +351,7 @@ func TestEnvironmentVariables(t *testing.T) {
parseFileFilters := func(out string) (extensions []string) {
// Match: - (^|/)[^/]*\.jpg$
find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`)
for line := range strings.SplitSeq(out, "\n") {
for _, line := range strings.Split(out, "\n") {
if m := find.FindStringSubmatch(line); m != nil {
extensions = append(extensions, m[1])
}

View File

@@ -189,7 +189,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
{{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}

View File

@@ -990,7 +990,7 @@ put them back in again.` >}}
- Ross Smith II <ross@smithii.com>
- Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com>
- Sudipto Baral <sudiptobaral.me@gmail.com>
- Sam Pegg <samrpegg@gmail.com> <70067376+S-Pegg1@users.noreply.github.com>
- Sam Pegg <samrpegg@gmail.com>
- liubingrun <liubr1@chinatelecom.cn>
- Albin Parou <fumesover@gmail.com>
- n4n5 <56606507+Its-Just-Nans@users.noreply.github.com>
@@ -1011,5 +1011,3 @@ put them back in again.` >}}
- dougal <dougal.craigwood@gmail.com> <147946567+roucc@users.noreply.github.com>
- anon-pradip <pradipsubedi360@gmail.com>
- Robin Rolf <imer@imer.cc>
- Jean-Christophe Cura <jcaspes@gmail.com>
- russcoss <russcoss@outlook.com>

View File

@@ -257,6 +257,5 @@ Properties:
## Limitations
- Erasure coding not supported, see [issue #8808](https://github.com/rclone/rclone/issues/8808)
- No server-side `Move` or `DirMove`.
- Checksums not implemented.

View File

@@ -152,23 +152,14 @@ However you can set this to restrict rclone to a specific folder
hierarchy.
In order to do this you will have to find the `Folder ID` of the
directory you wish rclone to display. This can be accomplished by executing
the ```rclone lsf``` command using a basic configuration setup that does not
include the ```root_folder_id``` parameter.
directory you wish rclone to display. This will be the `folder` field
of the URL when you open the relevant folder in the pCloud web
interface.
The command will enumerate available directories, allowing you to locate the
appropriate Folder ID for subsequent use.
Example:
```
$ rclone lsf --dirs-only -Fip --csv TestPcloud:
dxxxxxxxx2,My Music/
dxxxxxxxx3,My Pictures/
dxxxxxxxx4,My Videos/
```
So if the folder you want rclone to use your is "My Music/", then use the returned id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as
the `root_folder_id` variable value in the config file.
So if the folder you want rclone to use has a URL which looks like
`https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid`
in the browser, then you use `5xxxxxxxx8` as
the `root_folder_id` in the config.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go then run make backenddocs" >}}
### Standard options

View File

@@ -44,7 +44,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway" config="/s3/#spectralogic" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Storj" home="https://storj.io/" config="/s3/#storj" >}}
{{< provider name="Synology C2 Object Storage" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
@@ -6019,105 +6018,6 @@ region = ru-1
endpoint = s3.ru-1.storage.selcloud.ru
```
### Spectra Logic {#spectralogic}
[Spectra Logic](https://www.spectralogic.com/blackpearl-nearline-object-gateway)
is an on-prem S3-compatible object storage gateway that exposes local object storage and
policy-tiers data to Spectra tape and public clouds under a single namespace for
backup and archiving.
The S3 compatible gateway is configured using `rclone config` with a
type of `s3` and with a provider name of `SpectraLogic`. Here is an example
run of the configurator.
```
No remotes found, make a new one?
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
Enter name for new remote.
name> spectralogic
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
XX / Amazon S3 Compliant Storage Providers including ..., SpectraLogic, ...
\ (s3)
[snip]
Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
[snip]
XX / SpectraLogic BlackPearl
\ (SpectraLogic)
[snip]
provider> SpectraLogic
Option env_auth.
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own boolean value (true or false).
Press Enter for the default (false).
1 / Enter AWS credentials in the next step.
\ (false)
2 / Get AWS credentials from the environment (env vars or IAM).
\ (true)
env_auth> 1
Option access_key_id.
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
access_key_id> ACCESS_KEY
Option secret_access_key.
AWS Secret Access Key (password).
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
secret_access_key> SECRET_ACCESS_KEY
Option endpoint.
Endpoint for S3 API.
Required when using an S3 clone.
Enter a value. Press Enter to leave empty.
endpoint> https://bps3.example.com
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: s3
- provider: SpectraLogic
- access_key_id: ACCESS_KEY
- secret_access_key: SECRET_ACCESS_KEY
- endpoint: https://bps3.example.com
Keep this "spectratest" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
And your config should end up looking like this:
```
[spectratest]
type = s3
provider = SpectraLogic
access_key_id = ACCESS_KEY
secret_access_key = SECRET_ACCESS_KEY
endpoint = https://bps3.example.com
```
### Storj
Storj is a decentralized cloud storage which can be used through its

View File

@@ -261,7 +261,7 @@ func Set(config configmap.Getter, opt any) (err error) {
}
// setIfSameType set aPtr with b if they are the same type or returns false.
func setIfSameType(aPtr any, b any) bool {
func setIfSameType(aPtr interface{}, b interface{}) bool {
aVal := reflect.ValueOf(aPtr).Elem()
bVal := reflect.ValueOf(b)

View File

@@ -70,7 +70,7 @@ func (gs *Groups) Include(groupsString string) *Groups {
return gs
}
want := map[string]bool{}
for groupName := range strings.SplitSeq(groupsString, ",") {
for _, groupName := range strings.Split(groupsString, ",") {
_, ok := All.ByName[groupName]
if !ok {
fs.Fatalf(nil, "Couldn't find group %q in command annotation", groupName)
@@ -173,7 +173,7 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) {
// Add flag to Group if it is a global flag
if groupsString != "" && flags == pflag.CommandLine {
for groupName := range strings.SplitSeq(groupsString, ",") {
for _, groupName := range strings.Split(groupsString, ",") {
if groupName == "rc-" {
groupName = "RC"
}

View File

@@ -145,6 +145,7 @@ func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) {
func init() {
for _, name := range []string{"create", "update", "password"} {
name := name
extraHelp := ""
if name == "create" {
extraHelp = "- type - type of the new remote\n"

View File

@@ -213,7 +213,7 @@ func BenchmarkCheckParents(b *testing.B) {
dt.Add(o)
}
b.StartTimer()
for b.Loop() {
for n := 0; n < b.N; n++ {
dt.CheckParents("")
}
})

View File

@@ -62,7 +62,7 @@ func TestListRHelperSend(t *testing.T) {
helper := NewHelper(callback)
// Add 100 entries to force the callback to be invoked
for range 100 {
for i := 0; i < 100; i++ {
require.NoError(t, helper.Add(entry))
}
@@ -120,7 +120,7 @@ var _ fs.ListPer = (*mockListPfs)(nil)
func TestListWithListP(t *testing.T) {
ctx := context.Background()
var entries fs.DirEntries
for i := range 26 {
for i := 0; i < 26; i++ {
entries = append(entries, mockobject.New(fmt.Sprintf("%c", 'A'+i)))
}
t.Run("NoError", func(t *testing.T) {

View File

@@ -222,6 +222,7 @@ func (lh *listHelper) send(max int) (err error) {
g, gCtx := errgroup.WithContext(lh.ls.ctx)
g.SetLimit(lh.ls.ci.Checkers)
for i, key := range lh.keys {
i, key := i, key // can remove when go1.22 is minimum version
g.Go(func() error {
lh.entries[i], lh.errs[i] = lh.ls.keyToEntry(gCtx, key)
return nil

View File

@@ -144,7 +144,7 @@ func testSorterExt(t *testing.T, cutoff, N int, wantExtSort bool, keyFn KeyFn) {
// Make the directory entries
entriesMap := make(map[string]fs.DirEntry, N)
for i := range N {
for i := 0; i < N; i++ {
remote := fmt.Sprintf("%010d", i)
prefix := "a"
if i%3 == 0 {

View File

@@ -137,22 +137,20 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool, keyFn
)
return func(dir string, callback fs.ListRCallback) (err error) {
mu.Lock()
defer mu.Unlock()
if !started {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List
dirs, dirsErr = walk.NewDirTree(dirCtx, f, m.Dir, includeAll, ci.MaxDepth)
started = true
}
if dirsErr != nil {
mu.Unlock()
return dirsErr
}
entries, ok := dirs[dir]
if !ok {
mu.Unlock()
return fs.ErrorDirNotFound
}
delete(dirs, dir)
mu.Unlock()
// We use a stable sort here just in case there are
// duplicates. Assuming the remote delivers the entries in a

View File

@@ -32,7 +32,7 @@ func init() {
{"video/x-matroska", ".mpv,.mkv"},
{"application/x-subrip", ".srt"},
} {
for ext := range strings.SplitSeq(t.extensions, ",") {
for _, ext := range strings.Split(t.extensions, ",") {
if mime.TypeByExtension(ext) == "" {
err := mime.AddExtensionType(ext, t.mimeType)
if err != nil {

View File

@@ -160,6 +160,7 @@ func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) {
func init() {
for _, copy := range []bool{false, true} {
copy := copy
name := "Move"
if copy {
name = "Copy"
@@ -208,7 +209,7 @@ func init() {
{name: "rmdir", title: "Remove an empty directory or container"},
{name: "purge", title: "Remove a directory or container and all of its contents"},
{name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"},
{name: "delete", title: "Remove files in the path", help: "- rmdirs - boolean, set to true to remove empty directories\n- leaveRoot - boolean if rmdirs is set, set to true not to delete the root\n", noRemote: true},
{name: "delete", title: "Remove files in the path", noRemote: true},
{name: "deletefile", title: "Remove the single file pointed to"},
{name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url\n"},
{name: "uploadfile", title: "Upload file using multiform/form-data", help: "- each part in body represents a file to be uploaded\n", needsRequest: true, noCommand: true},
@@ -216,6 +217,7 @@ func init() {
{name: "settier", title: "Changes storage tier or class on all files in the path", noRemote: true},
{name: "settierfile", title: "Changes storage tier or class on the single file pointed to", noCommand: true},
} {
op := op
var remote, command string
if !op.noRemote {
remote = "- remote - a path within that remote e.g. \"dir\"\n"
@@ -267,22 +269,7 @@ func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bo
}
return nil, Rmdirs(ctx, f, remote, leaveRoot)
case "delete":
rmdirs, err := in.GetBool("rmdirs")
if rc.NotErrParamNotFound(err) {
return nil, err
}
leaveRoot, err := in.GetBool("leaveRoot")
if rc.NotErrParamNotFound(err) {
return nil, err
}
err = Delete(ctx, f)
if err != nil {
return nil, err
}
if !rmdirs {
return nil, nil
}
return nil, Rmdirs(ctx, f, remote, leaveRoot)
return nil, Delete(ctx, f)
case "deletefile":
o, err := f.NewObject(ctx, remote)
if err != nil {

View File

@@ -159,32 +159,21 @@ func TestRcCopyurl(t *testing.T) {
// operations/delete: Remove files in the path
func TestRcDelete(t *testing.T) {
ctx := context.Background()
r, call := rcNewRun(t, "operations/delete")
file1 := r.WriteObject(ctx, "subdir/file1", "subdir/file1 contents", t1)
file2 := r.WriteObject(ctx, "file2", "file2 contents", t1)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1, file2}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
file1 := r.WriteObject(context.Background(), "small", "1234567890", t2) // 10 bytes
file2 := r.WriteObject(context.Background(), "medium", "------------------------------------------------------------", t1) // 60 bytes
file3 := r.WriteObject(context.Background(), "large", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", t1) // 100 bytes
r.CheckRemoteItems(t, file1, file2, file3)
in := rc.Params{
"fs": r.FremoteName,
}
out, err := call.Fn(ctx, in)
out, err := call.Fn(context.Background(), in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{"subdir"}, fs.GetModifyWindow(ctx, r.Fremote))
// Now try with rmdirs=true and leaveRoot=true
in["rmdirs"] = true
in["leaveRoot"] = true
out, err = call.Fn(ctx, in)
require.NoError(t, err)
assert.Equal(t, rc.Params(nil), out)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.GetModifyWindow(ctx, r.Fremote))
// FIXME don't have an easy way of checking the root still exists or not
r.CheckRemoteItems(t)
}
// operations/deletefile: Remove the single file pointed to

View File

@@ -64,7 +64,7 @@ func filterBlocks(in Params, f func(oi fs.OptionsInfo)) (err error) {
return err
}
blocks := map[string]struct{}{}
for name := range strings.SplitSeq(blocksStr, ",") {
for _, name := range strings.Split(blocksStr, ",") {
if name != "" {
blocks[name] = struct{}{}
}

View File

@@ -206,7 +206,7 @@ func TestJobRunPanic(t *testing.T) {
runtime.Gosched() // yield to make sure job is updated
// Wait a short time for the panic to propagate
for i := range uint(10) {
for i := uint(0); i < 10; i++ {
job.mu.Lock()
e := job.Error
job.mu.Unlock()
@@ -539,7 +539,8 @@ func TestOnFinish(t *testing.T) {
func TestOnFinishAlreadyFinished(t *testing.T) {
jobID.Store(0)
done := make(chan struct{})
ctx := t.Context()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
job, _, err := NewJob(ctx, shortFn, rc.Params{})
assert.NoError(t, err)

View File

@@ -8,6 +8,7 @@ import (
func init() {
for _, name := range []string{"sync", "copy", "move"} {
name := name
moveHelp := ""
if name == "move" {
moveHelp = "- deleteEmptySrcDirs - delete empty src directories if set\n"

View File

@@ -743,7 +743,7 @@ func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy
if len(strategies) == 0 {
return strategy, nil
}
for s := range strings.SplitSeq(strategies, ",") {
for _, s := range strings.Split(strategies, ",") {
switch s {
case "hash":
strategy |= trackRenamesStrategyHash

View File

@@ -136,7 +136,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
items := []fstest.Item{}
for _, c := range alphabet {
var out strings.Builder
for i := range rune(7) {
for i := rune(0); i < 7; i++ {
out.WriteRune(c + i)
}
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))

View File

@@ -65,9 +65,6 @@ func Initialise() {
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
_ = config.SetConfigPath(envConfig)
}
if *RemoteName == "local" {
*RemoteName = ""
}
configfile.Install()
accounting.Start(ctx)
if *Verbose {

View File

@@ -157,7 +157,7 @@ func testsToRegexp(tests []string) string {
// Make a trie showing which parts are used at each level
for _, test := range tests {
parent := split
for name := range strings.SplitSeq(test, "/") {
for _, name := range strings.Split(test, "/") {
current := parent[name]
if current == nil {
current = trie{}

View File

@@ -82,7 +82,7 @@ func start(name string) error {
// parse the output and set environment vars from it
var connect string
var connectDelay time.Duration
for line := range bytes.SplitSeq(out, []byte("\n")) {
for _, line := range bytes.Split(out, []byte("\n")) {
line = bytes.TrimSpace(line)
part := matchLine.FindSubmatch(line)
if part != nil {

View File

@@ -184,8 +184,8 @@ func (mask MultiEncoder) String() string {
// Set converts a string into a MultiEncoder
func (mask *MultiEncoder) Set(in string) error {
var out MultiEncoder
parts := strings.SplitSeq(in, ",")
for part := range parts {
parts := strings.Split(in, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if bits, ok := nameToEncoding[part]; ok {
out |= bits

View File

@@ -31,7 +31,7 @@ func BenchmarkAllocFree(b *testing.B) {
for _, dirty := range []bool{false, true} {
for size := 4096; size <= 32*1024*1024; size *= 2 {
b.Run(fmt.Sprintf("%dk,dirty=%v", size>>10, dirty), func(b *testing.B) {
for b.Loop() {
for i := 0; i < b.N; i++ {
mem := MustAlloc(size)
if dirty {
mem[0] ^= 0xFF
@@ -62,7 +62,7 @@ func BenchmarkAllocFreeWithLotsOfAllocations(b *testing.B) {
for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 {
allocs := alloc(preAllocs)
b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) {
for b.Loop() {
for i := 0; i < b.N; i++ {
mem := MustAlloc(size)
mem[0] ^= 0xFF
MustFree(mem)
@@ -90,7 +90,7 @@ func BenchmarkAllocFreeForLotsOfAllocations(b *testing.B) {
}
for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 {
b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) {
for b.Loop() {
for i := 0; i < b.N; i++ {
allocs := alloc(preAllocs)
free(allocs)
}

View File

@@ -16,21 +16,23 @@ import (
// makes the allocations be unreliable
func makeUnreliable(bp *Pool) {
var allocCount int
tests := rand.Intn(4) + 1
const maxFailsInARow = 10
var allocFails int
bp.alloc = func(size int) ([]byte, error) {
allocCount++
if allocCount%tests != 0 {
if rand.Intn(3) != 0 && allocFails < maxFailsInARow {
allocFails++
return nil, errors.New("failed to allocate memory")
}
allocFails = 0
return make([]byte, size), nil
}
var freeCount int
var freeFails int
bp.free = func(b []byte) error {
freeCount++
if freeCount%tests != 0 {
if rand.Intn(3) != 0 && freeFails < maxFailsInARow {
freeFails++
return errors.New("failed to free memory")
}
freeFails = 0
return nil
}
}
@@ -288,24 +290,22 @@ func TestPoolMaxBufferMemory(t *testing.T) {
}
}
)
const trials = 50
for i := range trials {
for i := 0; i < 20; i++ {
wg.Add(1)
go func() {
defer wg.Done()
if i < trials/2 {
n := i%4 + 1
buf := bp.GetN(n)
countBuf(n)
time.Sleep(1 * time.Millisecond)
countBuf(-n)
if i < 4 {
buf := bp.GetN(i + 1)
countBuf(i + 1)
time.Sleep(100 * time.Millisecond)
bp.PutN(buf)
countBuf(-(i + 1))
} else {
buf := bp.Get()
countBuf(1)
time.Sleep(1 * time.Millisecond)
countBuf(-1)
time.Sleep(100 * time.Millisecond)
bp.Put(buf)
countBuf(-1)
}
}()
}

View File

@@ -212,7 +212,7 @@ type dirMap map[string]struct{}
// Create a dirMap from a string
func newDirMap(dirString string) (dm dirMap) {
dm = make(dirMap)
for entry := range strings.SplitSeq(dirString, "|") {
for _, entry := range strings.Split(dirString, "|") {
if entry != "" {
dm[entry] = struct{}{}
}