mirror of
https://github.com/rclone/rclone.git
synced 2026-01-27 14:53:20 +00:00
Compare commits
51 Commits
jotta-auth
...
v1.71-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2b292b31d | ||
|
|
4013f006b8 | ||
|
|
371fc61191 | ||
|
|
0d9a95125c | ||
|
|
386d3c5b4d | ||
|
|
3dceb72e9b | ||
|
|
30b016d8c0 | ||
|
|
b8e6d45482 | ||
|
|
dfdc69a3e1 | ||
|
|
0908c346bb | ||
|
|
f186b7554e | ||
|
|
4bbab927fa | ||
|
|
1c119f9330 | ||
|
|
089c4b1a69 | ||
|
|
b5ba737e14 | ||
|
|
15f727f0e3 | ||
|
|
01e1f90216 | ||
|
|
997c73b586 | ||
|
|
ccd01f1a68 | ||
|
|
63c0dc773c | ||
|
|
f1e6bffc14 | ||
|
|
1a98c36a73 | ||
|
|
931ab7c4db | ||
|
|
2b531ada34 | ||
|
|
45f45c987c | ||
|
|
d2351e60b6 | ||
|
|
f8de6b48f1 | ||
|
|
616a280aac | ||
|
|
e1833f4090 | ||
|
|
135d89d0f9 | ||
|
|
ea54bddbd5 | ||
|
|
baf6167930 | ||
|
|
1d91618d9e | ||
|
|
4a7e62b79c | ||
|
|
dcca477f39 | ||
|
|
7304ecaf18 | ||
|
|
c3932ecde1 | ||
|
|
c9df7b1cd7 | ||
|
|
3985496e5d | ||
|
|
0d2ef2eb20 | ||
|
|
836e19243d | ||
|
|
0a6cce1bc1 | ||
|
|
cffb6732a4 | ||
|
|
236f247c59 | ||
|
|
3b07f9d34d | ||
|
|
bad77c642f | ||
|
|
41eef6608b | ||
|
|
fc6bd9ff79 | ||
|
|
ee83cd214c | ||
|
|
2c2642a927 | ||
|
|
32eed8dd36 |
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -100,7 +100,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -222,7 +222,7 @@ jobs:
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
check-latest: true
|
||||
@@ -311,7 +311,7 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v6
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v8
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
@@ -628,7 +628,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
8410
MANUAL.html
generated
8410
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2248
MANUAL.txt
generated
2248
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
14
Makefile
14
Makefile
@@ -100,7 +100,6 @@ compiletest:
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@bin/markdown-lint
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
@@ -114,21 +113,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
go mod tidy
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
@@ -149,7 +148,6 @@ commanddocs: rclone
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
go run bin/make_bisync_docs.go ./docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
|
||||
@@ -59,7 +59,6 @@ directories to and from different cloud storage providers.
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
@@ -106,7 +105,6 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -1338,9 +1338,9 @@ func (f *Fs) containerOK(container string) bool {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
if !f.containerOK(containerName) {
|
||||
return fs.ErrorDirNotFound
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -1348,16 +1348,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(containerName)
|
||||
return nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
@@ -1393,47 +1393,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2152,6 +2119,7 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
@@ -3186,7 +3154,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -890,46 +890,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2464,7 +2432,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for part := range strings.SplitSeq(ranges, ",") {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
|
||||
@@ -187,6 +187,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
@@ -369,6 +370,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
@@ -635,6 +637,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
|
||||
@@ -598,7 +598,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -18,7 +18,7 @@ type headerLink struct {
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for link := range strings.SplitSeq(header, ",") {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for part := range strings.SplitSeq(link, ";") {
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||
for _, extensionText := range extensionsIn {
|
||||
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||
for _, extension := range strings.Split(extensionText, ",") {
|
||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||
if extension == "" {
|
||||
continue
|
||||
|
||||
@@ -386,6 +386,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
@@ -519,6 +520,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
|
||||
@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID any `json:"fld_id"`
|
||||
FldID interface{} `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
|
||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-east5",
|
||||
Help: "Ohio",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
@@ -760,7 +763,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -768,16 +771,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return err
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
@@ -820,46 +823,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1494,7 +1465,6 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -60,41 +59,31 @@ const (
|
||||
configVersion = 1
|
||||
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||
defaultClientID = "jottacli"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
key string
|
||||
name string
|
||||
domain string
|
||||
realm string
|
||||
clientID string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
func getServices() []service {
|
||||
return []service{
|
||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"mediamarkt", "MediaMarkt Cloud", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -170,42 +159,36 @@ func init() {
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch conf.State {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
case "":
|
||||
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||
return nil, errors.New("not supported by this backend")
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: `Standard authentication.
|
||||
This is primarily supported by the official service, but may also be supported
|
||||
by some of the white-label services. It is designed for command-line
|
||||
applications, and you will be asked to enter a single-use personal login token
|
||||
which you must manually generate from the account security settings in the
|
||||
web interface of your service.`,
|
||||
}, {
|
||||
Value: "traditional",
|
||||
Help: `Traditional authentication.
|
||||
This is supported by the official service and most of the white-label
|
||||
services, you will be asked which service to connect to. You need to be on
|
||||
a machine with an internet-connected web browser.`,
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: `Legacy authentication.
|
||||
This is no longer supported by any known services and not recommended used.
|
||||
You will be asked for your account's username and password.`,
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
return fs.ConfigGoto(config.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
case "standard_token":
|
||||
loginToken := conf.Result
|
||||
loginToken := config.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
@@ -220,50 +203,10 @@ service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "traditional":
|
||||
services := getServices()
|
||||
options := make([]fs.OptionExample, 0, len(services))
|
||||
for _, service := range services {
|
||||
options = append(options, fs.OptionExample{
|
||||
Value: service.key,
|
||||
Help: service.name,
|
||||
})
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||
options)
|
||||
case "traditional_type":
|
||||
services := getServices()
|
||||
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||
}
|
||||
service := services[i]
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||
}
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, service.clientID)
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||
TokenURL: wellKnown.TokenEndpoint,
|
||||
ClientID: service.clientID,
|
||||
Scopes: service.scopes,
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
@@ -271,7 +214,7 @@ machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if conf.Result == "true" {
|
||||
if config.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||
@@ -280,16 +223,16 @@ machines.`)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, conf.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
case "legacy_password":
|
||||
m.Set("password", conf.Result)
|
||||
m.Set("password", config.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -299,12 +242,12 @@ machines.`)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
@@ -317,7 +260,7 @@ machines.`)
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
@@ -329,6 +272,58 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -336,7 +331,7 @@ section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if conf.Result != "true" {
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
@@ -377,7 +372,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := conf.Result
|
||||
device := config.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -437,7 +432,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := conf.Result
|
||||
mountpoint := config.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -468,7 +463,7 @@ You may create a new by entering a unique name.`, device)
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
@@ -483,7 +478,7 @@ You may create a new by entering a unique name.`, device)
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -934,12 +929,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
|
||||
@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Local().Format(time.RFC3339Nano)
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
@@ -400,7 +400,7 @@ type quirks struct {
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for flag := range strings.SplitSeq(option, ",") {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
f.speedupAny = false
|
||||
uniqueValidPatterns := make(map[string]any)
|
||||
|
||||
for pattern := range strings.SplitSeq(patternString, ",") {
|
||||
for _, pattern := range strings.Split(patternString, ",") {
|
||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||
if pattern == "" {
|
||||
continue
|
||||
|
||||
@@ -325,12 +325,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// listDir lists the bucket to the entries
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to entries
|
||||
@@ -353,46 +354,15 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -659,7 +629,6 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -243,6 +243,7 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||
numSet = 0
|
||||
for k, v := range metadata {
|
||||
k, v := k, v
|
||||
switch k {
|
||||
case "mtime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
@@ -421,7 +422,12 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
|
||||
@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
|
||||
for b.Loop() {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
|
||||
@@ -254,47 +254,15 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -443,24 +411,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -797,7 +765,6 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
|
||||
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
|
||||
909
backend/s3/s3.go
909
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -200,7 +200,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||
|
||||
const numGoroutines = 10
|
||||
for range numGoroutines {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
mockFile := newMockFile()
|
||||
pool.pool = append(pool.pool, mockFile)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
// Test concurrent get operations
|
||||
done := make(chan bool, numGoroutines)
|
||||
|
||||
for range numGoroutines {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer func() { done <- true }()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
|
||||
for range numGoroutines {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
|
||||
@@ -192,9 +192,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if root is empty or ends with / (must be a directory)
|
||||
isRootDir := isPathDir(root)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
@@ -221,11 +218,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Skip stat check if root is already a directory
|
||||
if isRootDir {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -902,11 +894,6 @@ func ensureSuffix(s, suffix string) string {
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
// isPathDir determines if a path represents a directory based on trailing slash
|
||||
func isPathDir(path string) bool {
|
||||
return path == "" || strings.HasSuffix(path, "/")
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
// Unit tests for internal SMB functions
|
||||
package smb
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestIsPathDir tests the isPathDir function logic
|
||||
func TestIsPathDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Empty path should be considered a directory
|
||||
{"", true},
|
||||
|
||||
// Paths with trailing slash should be directories
|
||||
{"/", true},
|
||||
{"share/", true},
|
||||
{"share/dir/", true},
|
||||
{"share/dir/subdir/", true},
|
||||
|
||||
// Paths without trailing slash should not be directories
|
||||
{"share", false},
|
||||
{"share/dir", false},
|
||||
{"share/dir/file", false},
|
||||
{"share/dir/subdir/file", false},
|
||||
|
||||
// Edge cases
|
||||
{"share//", true},
|
||||
{"share///", true},
|
||||
{"share/dir//", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isPathDir(tt.path)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -773,20 +773,21 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
if container == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
return nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
@@ -817,46 +818,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1681,7 +1650,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
|
||||
@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for checksum := range strings.SplitSeq(checksums, " ") {
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/runs"
|
||||
"github.com/stretchr/testify/assert/yaml"
|
||||
)
|
||||
|
||||
var path = flag.String("path", "./docs/content/", "root path")
|
||||
|
||||
const (
|
||||
configFile = "fstest/test_all/config.yaml"
|
||||
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
|
||||
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing ignores: %v", err)
|
||||
}
|
||||
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing failures: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// replaceBetween replaces the text between startSep and endSep with fn()
|
||||
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
|
||||
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
doc := string(b)
|
||||
|
||||
before, after, found := strings.Cut(doc, startSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", startSep)
|
||||
}
|
||||
_, after, found = strings.Cut(after, endSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", endSep)
|
||||
}
|
||||
|
||||
replaceSection, err := fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
|
||||
|
||||
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIgnores updates the list of ignores from config.yaml
|
||||
func getIgnores() (string, error) {
|
||||
config, err := parseConfig()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse config: %v", err)
|
||||
}
|
||||
s := ""
|
||||
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
|
||||
return cmp.Compare(a.Remote, b.Remote)
|
||||
})
|
||||
for _, backend := range config.Backends {
|
||||
include := false
|
||||
|
||||
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
|
||||
include = true
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
|
||||
for _, ignore := range backend.Ignore {
|
||||
if strings.Contains(strings.ToLower(ignore), "bisync") {
|
||||
if !include { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
include = true
|
||||
s += fmt.Sprintf(" - `%s`\n", ignore)
|
||||
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// getFailures updates the list of currently failing tests from the integration tests server
|
||||
func getFailures() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
r := runs.Report{}
|
||||
err = json.Unmarshal(buf.Bytes(), &r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal json: %v", err)
|
||||
}
|
||||
|
||||
s := ""
|
||||
for _, run := range r.Failed {
|
||||
for i, t := range run.FailedTests {
|
||||
if strings.Contains(strings.ToLower(t), "bisync") {
|
||||
|
||||
if i == 0 { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
|
||||
}
|
||||
|
||||
url := integrationTestsHTMLURL + run.TrialName
|
||||
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
|
||||
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
|
||||
|
||||
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
|
||||
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s += fmt.Sprintf("- Updated: %v", r.DateTime)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// parseConfig reads and parses the config.yaml file
|
||||
func parseConfig() (*runs.Config, error) {
|
||||
d, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
config := &runs.Config{}
|
||||
err = yaml.Unmarshal(d, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run markdown linting locally
|
||||
set -e
|
||||
|
||||
# Workflow
|
||||
build=.github/workflows/build.yml
|
||||
|
||||
# Globs read from from $build
|
||||
globs=$(awk '/- name: Check Markdown format/{f=1;next} f && /globs:/{f=2;next} f==2 && NF{if($1=="-"){exit} print $0}' $build)
|
||||
|
||||
if [ -z "$globs" ]; then
|
||||
echo "Error: No globs found in Check Markdown step in $build" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
|
||||
@@ -33,7 +33,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
|
||||
for _, line := range bytes.Split(out, []byte{'\n'}) {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -177,7 +177,6 @@ var (
|
||||
// "src and dst identical but can't set mod time without deleting and re-uploading"
|
||||
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
|
||||
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
|
||||
argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10
|
||||
)
|
||||
|
||||
// bisyncTest keeps all test data in a single place
|
||||
@@ -285,15 +284,6 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
if !isLocal(*fstest.RemoteName) {
|
||||
t.Skip("TestBisyncConcurrent is skipped on non-local")
|
||||
}
|
||||
if *argTestCase != "" && *argTestCase != "basic" {
|
||||
t.Skip("TestBisyncConcurrent only tests 'basic'")
|
||||
}
|
||||
if *argPCount < 2 {
|
||||
t.Skip("TestBisyncConcurrent is pointless with -pcount < 2")
|
||||
}
|
||||
if *argGolden {
|
||||
t.Skip("skip TestBisyncConcurrent when goldenizing")
|
||||
}
|
||||
oldArgTestCase := argTestCase
|
||||
*argTestCase = "basic"
|
||||
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
|
||||
@@ -302,9 +292,8 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
*ignoreLogs = false
|
||||
})
|
||||
|
||||
for i := 0; i < *argPCount; i++ {
|
||||
t.Run(fmt.Sprintf("test%v", i), testParallel)
|
||||
}
|
||||
t.Run("test1", testParallel)
|
||||
t.Run("test2", testParallel)
|
||||
}
|
||||
|
||||
func testParallel(t *testing.T) {
|
||||
@@ -476,7 +465,6 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
|
||||
// Prepare initial content
|
||||
b.cleanupCase(ctx)
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision())
|
||||
initFs, err := cache.Get(ctx, b.initDir)
|
||||
@@ -522,7 +510,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
require.NoError(b.t, err)
|
||||
b.step = 0
|
||||
b.stopped = false
|
||||
for line := range strings.SplitSeq(string(scenBuf), "\n") {
|
||||
for _, line := range strings.Split(string(scenBuf), "\n") {
|
||||
comment := strings.Index(line, "#")
|
||||
if comment != -1 {
|
||||
line = line[:comment]
|
||||
@@ -653,11 +641,12 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||
_ = operations.Purge(ctx, b.fs1, "")
|
||||
_ = operations.Purge(ctx, b.fs2, "")
|
||||
_ = os.RemoveAll(b.workDir)
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
}
|
||||
|
||||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -936,7 +925,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
// splitLine splits scenario line into tokens and performs
|
||||
// substitutions that involve whitespace or control chars.
|
||||
func splitLine(line string) (args []string) {
|
||||
for s := range strings.FieldsSeq(line) {
|
||||
for _, s := range strings.Fields(line) {
|
||||
b := []byte(whitespaceReplacer.Replace(s))
|
||||
b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte {
|
||||
c, _ := strconv.ParseUint(string(b[5:7]), 16, 8)
|
||||
@@ -1018,7 +1007,6 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
}
|
||||
// test if modtimes are writeable
|
||||
testSetModtime := func(f fs.Fs) {
|
||||
ctx := accounting.WithStatsGroup(ctx, random.String(8)) // keep stats separate
|
||||
in := bytes.NewBufferString("modtime_write_test")
|
||||
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
|
||||
obj, err := f.Put(ctx, in, objinfo)
|
||||
@@ -1030,11 +1018,6 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
if err == fs.ErrorCantSetModTime {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
}
|
||||
if err == fs.ErrorCantSetModTimeWithoutDelete { // transfers stats expected to differ on this backend
|
||||
logReplacements = append(logReplacements, `^.*There was nothing to transfer.*$`, dropMe)
|
||||
} else {
|
||||
require.NoError(b.t, err)
|
||||
}
|
||||
if !f.Features().IsLocal {
|
||||
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
|
||||
}
|
||||
@@ -1513,7 +1496,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
|
||||
fs.Log(nil, divider)
|
||||
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") {
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
fs.Logf(nil, "| %s", strings.TrimSpace(line))
|
||||
}
|
||||
}
|
||||
@@ -1636,14 +1619,6 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
if b.ignoreBlankHash || !b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5) {
|
||||
// if either side lacks support for md5, need to ignore the "nothing to transfer" log,
|
||||
// as sync may in fact need to transfer, where it would otherwise skip based on hash or just update modtime.
|
||||
// transfer stats will also differ in fs.ErrorCantSetModTimeWithoutDelete scenario, and where --download-hash is needed.
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*There was nothing to transfer.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
||||
@@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.SplitSeq(b.opt.CompareFlag, ",")
|
||||
for opt := range opts {
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
|
||||
@@ -707,7 +707,8 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.Stats(ctx).RemoveDoneTransfers()
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
|
||||
@@ -245,8 +245,10 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||
}
|
||||
}
|
||||
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
if accounting.MaxCompletedTransfers != -1 {
|
||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||
}
|
||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||
b.testFn()
|
||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -61,7 +59,6 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -136,7 +133,6 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -89,7 +87,6 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -21,9 +21,7 @@ INFO : Using filters file {workdir/}exclude-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}exclude-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -138,9 +136,7 @@ INFO : Using filters file {workdir/}include-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}include-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -92,9 +90,7 @@ INFO : Copying Path2 files to Path1
|
||||
INFO : Checking access health
|
||||
INFO : Found 2 matching ".chk_file" files on both paths
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -104,9 +102,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -23,9 +23,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -82,7 +80,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : subdir: Making directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -126,7 +124,6 @@ INFO : Path2: 1 changes: [32m 0 new[0m, [33m 0 modified[0m, [31m
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}RCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -151,9 +148,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -193,7 +188,6 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : subdir: Removing directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -27,9 +27,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}測試Русский ěáñ/" with Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}測試Русский ěáñ/" vs Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -86,9 +84,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -178,9 +174,7 @@ INFO : Using filters file {workdir/}測試_filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}測試_filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -20,9 +20,7 @@ INFO : Using filters file {workdir/}filtersfile.flt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.flt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -83,9 +81,7 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -150,9 +146,7 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Skipped storing filters file hash to {workdir/}filtersfile.txt.{hashtype} as --dry-run is set
|
||||
INFO : Copying Path2 files to Path1
|
||||
NOTICE: - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
NOTICE: - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -35,9 +33,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -86,7 +84,6 @@ INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file4.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file5.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -86,7 +84,6 @@ INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file5.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -17,9 +17,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -117,9 +115,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -158,7 +154,6 @@ INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file2.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}subdir/file21.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -176,7 +171,6 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -39,7 +39,6 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -22,7 +22,6 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
@@ -130,7 +129,6 @@ INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : Resync updating listings
|
||||
@@ -160,7 +158,6 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,9 +16,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -47,7 +45,6 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,9 +15,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -37,7 +37,6 @@ func init() {
|
||||
configCommand.AddCommand(configDisconnectCommand)
|
||||
configCommand.AddCommand(configUserInfoCommand)
|
||||
configCommand.AddCommand(configEncryptionCommand)
|
||||
configCommand.AddCommand(configStringCommand)
|
||||
}
|
||||
|
||||
var configCommand = &cobra.Command{
|
||||
@@ -614,55 +613,3 @@ If the config file is not encrypted it will return a non zero exit code.`, "|",
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var configStringCommand = &cobra.Command{
|
||||
Use: "string <remote>",
|
||||
Short: `Print connection string for a single remote.`,
|
||||
Long: strings.ReplaceAll(`Print a connection string for a single remote.
|
||||
|
||||
The [connection strings](/docs/#connection-strings) can be used
|
||||
wherever a remote is needed and can be more convenient than using the
|
||||
config file, especially if using the RC API.
|
||||
|
||||
Backend parameters may be provided to the command also.
|
||||
|
||||
Example:
|
||||
|
||||
|||sh
|
||||
$ rclone config string s3:rclone --s3-no-check-bucket
|
||||
:s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone
|
||||
|||
|
||||
|
||||
**NB** the strings are not quoted for use in shells (eg bash,
|
||||
powershell, windows cmd). Most will work if enclosed in "double
|
||||
quotes", however connection strings that contain double quotes will
|
||||
require further quoting which is very shell dependent.
|
||||
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
remote := args[0]
|
||||
fsInfo, _, fsPath, m, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find the overridden options and construct the string
|
||||
overridden := fsInfo.Options.NonDefault(m)
|
||||
var out strings.Builder
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsInfo.Name)
|
||||
config := overridden.Human()
|
||||
if config != "" {
|
||||
out.WriteRune(',')
|
||||
out.WriteString(config)
|
||||
}
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsPath)
|
||||
fmt.Println(out.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := range rune(7) {
|
||||
for i := rune(0); i < 7; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
|
||||
@@ -3,7 +3,6 @@ package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -13,9 +12,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,7 +21,6 @@ var (
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -35,7 +31,6 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "")
|
||||
flags.BoolVarP(cmdFlags, &urls, "urls", "", stdout, "Use a CSV file of links to process multiple URLs", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -59,17 +54,6 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
Setting |--urls| allows you to input a CSV file of URLs in format: URL,
|
||||
FILENAME. If |--urls| is in use then replace the URL in the arguments with the
|
||||
file containing the URLs, e.g.:
|
||||
|||sh
|
||||
rclone copyurl --urls myurls.csv remote:dir
|
||||
|||
|
||||
Missing filenames will be autogenerated equivalent to using |--auto-filename|.
|
||||
Note that |--stdout| and |--print-filename| are incompatible with |--urls|.
|
||||
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
|
||||
is desired for all URLs then a file with only URLs and no filename can be used.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
@@ -86,93 +70,32 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
cmd.Run(true, true, command, func() error {
|
||||
if !urls {
|
||||
return run(args)
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
return runURLS(args)
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
cmd.Run(true, true, command, func() error {
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
}
|
||||
return err
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var copyURL = operations.CopyURL // for testing
|
||||
|
||||
// runURLS processes a .csv file of urls and filenames
|
||||
func runURLS(args []string) (err error) {
|
||||
if stdout {
|
||||
return errors.New("can't use --stdout with --urls")
|
||||
}
|
||||
if printFilename {
|
||||
return errors.New("can't use --print-filename with --urls")
|
||||
}
|
||||
dstFs := cmd.NewFsDir(args[1:])
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open .csv file: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(f, &err)
|
||||
reader := csv.NewReader(f)
|
||||
reader.FieldsPerRecord = -1
|
||||
urlList, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading .csv file: %w", err)
|
||||
}
|
||||
|
||||
ec := errcount.New()
|
||||
g, gCtx := errgroup.WithContext(context.Background())
|
||||
ci := fs.GetConfig(gCtx)
|
||||
g.SetLimit(ci.Transfers)
|
||||
|
||||
for _, urlEntry := range urlList {
|
||||
if len(urlEntry) == 0 {
|
||||
continue
|
||||
}
|
||||
g.Go(func() error {
|
||||
url := urlEntry[0]
|
||||
var filename string
|
||||
if len(urlEntry) > 1 {
|
||||
filename = urlEntry[1]
|
||||
}
|
||||
_, err := copyURL(gCtx, dstFs, filename, url, filename == "", headerFilename, noClobber)
|
||||
if err != nil {
|
||||
fs.Errorf(filename, "failed to copy URL %q: %v", url, err)
|
||||
ec.Add(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
ec.Add(g.Wait())
|
||||
return ec.Err("not all URLs copied successfully")
|
||||
}
|
||||
|
||||
// run runs the command for a single URL
|
||||
func run(args []string) error {
|
||||
var err error
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = copyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func resetGlobals() {
|
||||
autoFilename = false
|
||||
headerFilename = false
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
copyURL = operations.CopyURL
|
||||
}
|
||||
|
||||
func TestRun_RequiresTwoArgsWhenNotStdout(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
err := run([]string{"https://example.com/foo"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "need 2 arguments if not using --stdout")
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithExplicitFilename_Success(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
dstPath := filepath.Join(tmp, "out.txt")
|
||||
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "https://example.com/file", url)
|
||||
assert.Equal(t, "out.txt", dstFileName)
|
||||
assert.False(t, auto)
|
||||
assert.False(t, header)
|
||||
assert.False(t, noclobber)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/file", dstPath})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithAutoFilename_AndPropagatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
autoFilename = true
|
||||
|
||||
want := errors.New("boom")
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "", dstFileName) // auto filename -> empty
|
||||
assert.True(t, auto)
|
||||
return nil, want
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/auto/name", tmp})
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, want, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRunURLS_ErrorsWithStdoutAndWithPrintFilename(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
stdout = true
|
||||
err := runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --stdout with --urls")
|
||||
|
||||
resetGlobals()
|
||||
printFilename = true
|
||||
err = runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --print-filename with --urls")
|
||||
}
|
||||
|
||||
func TestRunURLS_ProcessesCSV_ParallelCalls_AndAggregatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
csvPath := filepath.Join(tmp, "urls.csv")
|
||||
csvContent := []byte(
|
||||
"https://example.com/a,aaa.txt\n" + // success
|
||||
"https://example.com/b\n" + // auto filename
|
||||
"https://example.com/c,ccc.txt\n") // error
|
||||
require.NoError(t, os.WriteFile(csvPath, csvContent, 0o600))
|
||||
|
||||
// destination dir (local backend)
|
||||
dest := t.TempDir()
|
||||
|
||||
// mock copyURL: succeed for /a and /b, fail for /c
|
||||
|
||||
var calls int32
|
||||
var mu sync.Mutex
|
||||
var seen []string
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&calls, 1)
|
||||
mu.Lock()
|
||||
seen = append(seen, url+"|"+dstFileName)
|
||||
mu.Unlock()
|
||||
|
||||
switch {
|
||||
case url == "https://example.com/a":
|
||||
require.Equal(t, "aaa.txt", dstFileName)
|
||||
return nil, nil
|
||||
case url == "https://example.com/b":
|
||||
require.Equal(t, "", dstFileName) // auto-name path
|
||||
return nil, nil
|
||||
case url == "https://example.com/c":
|
||||
return nil, errors.New("network down")
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := runURLS([]string{csvPath, dest})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not all URLs copied successfully")
|
||||
// 3 lines => 3 calls
|
||||
assert.Equal(t, int32(3), atomic.LoadInt32(&calls))
|
||||
|
||||
// sanity: all expected URLs were seen
|
||||
assert.ElementsMatch(t,
|
||||
[]string{
|
||||
"https://example.com/a|aaa.txt",
|
||||
"https://example.com/b|",
|
||||
"https://example.com/c|ccc.txt",
|
||||
},
|
||||
seen,
|
||||
)
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
@@ -92,7 +91,7 @@ rclone.org website.`,
|
||||
Aliases []string
|
||||
Annotations map[string]string
|
||||
}
|
||||
commands := map[string]commandDetails{}
|
||||
var commands = map[string]commandDetails{}
|
||||
var addCommandDetails func(root *cobra.Command, parentAliases []string)
|
||||
addCommandDetails = func(root *cobra.Command, parentAliases []string) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
@@ -159,7 +158,7 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
|
||||
outdentTitle := regexp.MustCompile(`(?m)^#(#+)`)
|
||||
var outdentTitle = regexp.MustCompile(`(?m)^#(#+)`)
|
||||
|
||||
// Munge the files to add a link to the global flags page
|
||||
err = filepath.Walk(out, func(path string, info os.FileInfo, err error) error {
|
||||
@@ -170,20 +169,6 @@ rclone.org website.`,
|
||||
name := filepath.Base(path)
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
switch name {
|
||||
case "rclone_mount.md":
|
||||
switch runtime.GOOS {
|
||||
case "darwin", "windows":
|
||||
fs.Logf(nil, "Skipping docs for command not available without the cmount build tag: %v", name)
|
||||
return nil
|
||||
}
|
||||
case "rclone_nfsmount.md", "rclone_serve_nfs.md":
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
fs.Logf(nil, "Skipping docs for command not supported on %v: %v", runtime.GOOS, name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("didn't find command for %q", name)
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
|
||||
@@ -229,6 +229,7 @@ func TestEndToEnd(t *testing.T) {
|
||||
skipE2eTestIfNecessary(t)
|
||||
|
||||
for _, mode := range allLayoutModes() {
|
||||
mode := mode
|
||||
t.Run(string(mode), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -257,6 +258,7 @@ func TestEndToEndMigration(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, mode := range allLayoutModes() {
|
||||
mode := mode
|
||||
t.Run(string(mode), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@@ -316,6 +318,7 @@ func TestEndToEndRepoLayoutCompat(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, mode := range allLayoutModes() {
|
||||
mode := mode
|
||||
t.Run(string(mode), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -344,7 +344,7 @@ func showBackend(name string) {
|
||||
}
|
||||
for _, ex := range opt.Examples {
|
||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||
for line := range strings.SplitSeq(ex.Help, "\n") {
|
||||
for _, line := range strings.Split(ex.Help, "\n") {
|
||||
fmt.Printf(" - %s\n", line)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details", "")
|
||||
flags.StringVarP(cmdFlags, &timeFormat, "time-format", "t", "", "Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05)", "")
|
||||
flags.StringVarP(cmdFlags, &timeFormat, "time-format", "t", "", "Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)", "")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format", "")
|
||||
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names", "")
|
||||
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "")
|
||||
@@ -169,8 +169,6 @@ rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z
|
||||
rclone lsf remote:path --format pt --time-format RFC3339
|
||||
rclone lsf remote:path --format pt --time-format DateOnly
|
||||
rclone lsf remote:path --format pt --time-format max
|
||||
rclone lsf remote:path --format pt --time-format unix
|
||||
rclone lsf remote:path --format pt --time-format unixnano
|
||||
` + "```" + `
|
||||
|
||||
` + "`--time-format max`" + ` will automatically truncate ` + "`2006-01-02 15:04:05.000000000`" + `
|
||||
|
||||
@@ -41,10 +41,9 @@ var OptionsInfo = fs.Options{}.
|
||||
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
DisableZip bool
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
@@ -70,7 +69,6 @@ func init() {
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
flagSet.BoolVar(&Opt.DisableZip, "disable-zip", false, "Disable zip download of directories")
|
||||
cmdserve.Command.AddCommand(Command)
|
||||
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
|
||||
// Read VFS Opts
|
||||
@@ -259,24 +257,6 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
return
|
||||
}
|
||||
dir := node.(*vfs.Dir)
|
||||
|
||||
if r.URL.Query().Get("download") == "zip" && !s.opt.DisableZip {
|
||||
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
|
||||
zipName := path.Base(dirRemote)
|
||||
if dirRemote == "" {
|
||||
zipName = "root"
|
||||
}
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
|
||||
err := vfs.CreateZip(ctx, dir, w)
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, w, "Failed to create zip", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||
@@ -300,8 +280,6 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
// Set the Last-Modified header to the timestamp
|
||||
w.Header().Set("Last-Modified", dir.ModTime().UTC().Format(http.TimeFormat))
|
||||
|
||||
directory.DisableZip = s.opt.DisableZip
|
||||
|
||||
directory.Serve(w, r)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"io"
|
||||
stdfs "io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -76,16 +75,6 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
|
||||
return s, testURL
|
||||
}
|
||||
|
||||
// setAllModTimes walks root and sets atime/mtime to t for every file & directory.
|
||||
func setAllModTimes(root string, t time.Time) error {
|
||||
return filepath.WalkDir(root, func(path string, d stdfs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(path, t, t)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
datedObject = "two.txt"
|
||||
expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
@@ -134,8 +123,6 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
|
||||
f = nil
|
||||
} else {
|
||||
// set all the mod times to expectedTime
|
||||
require.NoError(t, setAllModTimes("testdata/files", expectedTime))
|
||||
// Create a test Fs
|
||||
var err error
|
||||
f, err = fs.NewFs(context.Background(), "testdata/files")
|
||||
@@ -246,16 +233,6 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
Range: "bytes=3-",
|
||||
Golden: "testdata/golden/two3-.txt",
|
||||
},
|
||||
{
|
||||
URL: "/?download=zip",
|
||||
Status: http.StatusOK,
|
||||
Golden: "testdata/golden/root.zip",
|
||||
},
|
||||
{
|
||||
URL: "/three/?download=zip",
|
||||
Status: http.StatusOK,
|
||||
Golden: "testdata/golden/three.zip",
|
||||
},
|
||||
} {
|
||||
method := test.Method
|
||||
if method == "" {
|
||||
|
||||
BIN
cmd/serve/http/testdata/golden/root.zip
vendored
BIN
cmd/serve/http/testdata/golden/root.zip
vendored
Binary file not shown.
BIN
cmd/serve/http/testdata/golden/three.zip
vendored
BIN
cmd/serve/http/testdata/golden/three.zip
vendored
Binary file not shown.
@@ -66,6 +66,7 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
|
||||
func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) {
|
||||
var wg sync.WaitGroup
|
||||
for i := range 100 {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -124,6 +125,7 @@ func TestCache(t *testing.T) {
|
||||
}()
|
||||
billyFS := &FS{nil} // place holder billyFS
|
||||
for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} {
|
||||
cacheType := cacheType
|
||||
t.Run(cacheType.String(), func(t *testing.T) {
|
||||
h := &Handler{
|
||||
vfs: vfs.New(object.MemoryFs, nil),
|
||||
|
||||
@@ -182,7 +182,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) {
|
||||
// Obscure any values in the config map that need it
|
||||
obscureFields, ok := config.Get("_obscure")
|
||||
if ok {
|
||||
for key := range strings.SplitSeq(obscureFields, ",") {
|
||||
for _, key := range strings.Split(obscureFields, ",") {
|
||||
value, ok := config.Get(key)
|
||||
if ok {
|
||||
obscuredValue, err := obscure.Obscure(value)
|
||||
|
||||
@@ -34,7 +34,7 @@ func (r *results) checkBase32768() {
|
||||
// Create test files
|
||||
for _, c := range safeAlphabet {
|
||||
var out strings.Builder
|
||||
for i := range rune(32) {
|
||||
for i := rune(0); i < 32; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
|
||||
@@ -292,7 +292,7 @@ func (r *results) checkControls() {
|
||||
tokens <- struct{}{}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
for i := range rune(128) {
|
||||
for i := rune(0); i < 128; i++ {
|
||||
s := string(i)
|
||||
if i == 0 || i == '/' {
|
||||
// We're not even going to check NULL or /
|
||||
|
||||
@@ -95,7 +95,7 @@ func (e *Position) UnmarshalText(text []byte) error {
|
||||
switch s := strings.ToLower(string(text)); s {
|
||||
default:
|
||||
*e = PositionNone
|
||||
for p := range strings.SplitSeq(s, ",") {
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
switch p {
|
||||
case "left":
|
||||
*e |= PositionLeft
|
||||
|
||||
@@ -61,18 +61,15 @@ func init() {
|
||||
test.Command.AddCommand(makefileCmd)
|
||||
makefileFlags := makefileCmd.Flags()
|
||||
|
||||
addCommonFlags(makefilesFlags)
|
||||
addCommonFlags(makefileFlags)
|
||||
}
|
||||
|
||||
// Common flags for makefiles and makefile
|
||||
func addCommonFlags(f *pflag.FlagSet) {
|
||||
flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)", "")
|
||||
flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00", "")
|
||||
flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)", "")
|
||||
flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only", "")
|
||||
flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern", "")
|
||||
flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern", "")
|
||||
// Common flags to makefiles and makefile
|
||||
for _, f := range []*pflag.FlagSet{makefilesFlags, makefileFlags} {
|
||||
flags.Int64VarP(f, &seed, "seed", "", seed, "Seed for the random number generator (0 for random)", "")
|
||||
flags.BoolVarP(f, &zero, "zero", "", zero, "Fill files with ASCII 0x00", "")
|
||||
flags.BoolVarP(f, &sparse, "sparse", "", sparse, "Make the files sparse (appear to be filled with ASCII 0x00)", "")
|
||||
flags.BoolVarP(f, &ascii, "ascii", "", ascii, "Fill files with random ASCII printable bytes only", "")
|
||||
flags.BoolVarP(f, &pattern, "pattern", "", pattern, "Fill files with a periodic pattern", "")
|
||||
flags.BoolVarP(f, &chargen, "chargen", "", chargen, "Fill files with a ASCII chargen pattern", "")
|
||||
}
|
||||
}
|
||||
|
||||
var makefilesCmd = &cobra.Command{
|
||||
@@ -126,24 +123,20 @@ var makefileCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to parse size %q: %v", args[0], err)
|
||||
}
|
||||
makefiles(size, args[1:])
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size)
|
||||
totalBytes := int64(0)
|
||||
for _, filePath := range args[1:] {
|
||||
dir := filepath.Dir(filePath)
|
||||
name := filepath.Base(filePath)
|
||||
writeFile(dir, name, int64(size))
|
||||
totalBytes += int64(size)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
},
|
||||
}
|
||||
|
||||
func makefiles(size fs.SizeSuffix, files []string) {
|
||||
start := time.Now()
|
||||
fs.Logf(nil, "Creating %d files of size %v.", len(files), size)
|
||||
totalBytes := int64(0)
|
||||
for _, filePath := range files {
|
||||
dir := filepath.Dir(filePath)
|
||||
name := filepath.Base(filePath)
|
||||
writeFile(dir, name, int64(size))
|
||||
totalBytes += int64(size)
|
||||
}
|
||||
dt := time.Since(start)
|
||||
fs.Logf(nil, "Written %vB in %v at %vB/s.", fs.SizeSuffix(totalBytes), dt.Round(time.Millisecond), fs.SizeSuffix((totalBytes*int64(time.Second))/int64(dt)))
|
||||
}
|
||||
|
||||
func bool2int(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
|
||||
@@ -1,235 +0,0 @@
|
||||
package makefiles
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/test"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
// Flags
|
||||
testTime = fs.Duration(15 * time.Second)
|
||||
fcap = 100
|
||||
small = fs.SizeSuffix(1024)
|
||||
medium = fs.SizeSuffix(10 * 1024 * 1024)
|
||||
large = fs.SizeSuffix(1024 * 1024 * 1024)
|
||||
useJSON = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
test.Command.AddCommand(speedCmd)
|
||||
|
||||
speedFlags := speedCmd.Flags()
|
||||
flags.FVarP(speedFlags, &testTime, "test-time", "", "Length for each test to run", "")
|
||||
flags.IntVarP(speedFlags, &fcap, "file-cap", "", fcap, "Maximum number of files to use in each test", "")
|
||||
flags.FVarP(speedFlags, &small, "small", "", "Size of small files", "")
|
||||
flags.FVarP(speedFlags, &medium, "medium", "", "Size of medium files", "")
|
||||
flags.FVarP(speedFlags, &large, "large", "", "Size of large files", "")
|
||||
flags.BoolVarP(speedFlags, &useJSON, "json", "", useJSON, "Output only results in JSON format", "")
|
||||
|
||||
addCommonFlags(speedFlags)
|
||||
}
|
||||
|
||||
func logf(text string, args ...any) {
|
||||
if !useJSON {
|
||||
fmt.Printf(text, args...)
|
||||
}
|
||||
}
|
||||
|
||||
var speedCmd = &cobra.Command{
|
||||
Use: "speed <remote> [flags]",
|
||||
Short: `Run a speed test to the remote`,
|
||||
Long: `Run a speed test to the remote.
|
||||
|
||||
This command runs a series of uploads and downloads to the remote, measuring
|
||||
and printing the speed of each test using varying file sizes and numbers of
|
||||
files.
|
||||
|
||||
Test time can be innaccurate with small file caps and large files. As it
|
||||
uses the results of an initial test to determine how many files to use in
|
||||
each subsequent test.
|
||||
|
||||
It is recommended to use -q flag for a simpler output. e.g.:
|
||||
|
||||
rlone test speed remote: -q
|
||||
|
||||
**NB** This command will create and delete files on the remote in a randomly
|
||||
named directory which should be tidied up after.
|
||||
|
||||
You can use the --json flag to only print the results in JSON format.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := command.Context()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
commonInit()
|
||||
|
||||
// initial test
|
||||
size := fs.SizeSuffix(1024 * 1024)
|
||||
logf("Running initial test for 4 files of size %v\n", size)
|
||||
stats, err := speedTest(ctx, 4, size, args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("speed test failed: %w", err)
|
||||
}
|
||||
|
||||
var results []*Stats
|
||||
|
||||
// main tests
|
||||
logf("\nTest Time: %v, File cap: %d\n", testTime, fcap)
|
||||
for _, size := range []fs.SizeSuffix{small, medium, large} {
|
||||
numberOfFilesUpload := int((float64(stats.Upload.Speed) * time.Duration(testTime).Seconds()) / float64(size))
|
||||
numberOfFilesDownload := int((float64(stats.Download.Speed) * time.Duration(testTime).Seconds()) / float64(size))
|
||||
numberOfFiles := min(numberOfFilesUpload, numberOfFilesDownload)
|
||||
|
||||
logf("\nNumber of files for upload and download: %v\n", numberOfFiles)
|
||||
if numberOfFiles < 1 {
|
||||
logf("Skipping test for file size %v as calculated number of files is 0\n", size)
|
||||
continue
|
||||
} else if numberOfFiles > fcap {
|
||||
numberOfFiles = fcap
|
||||
logf("Capping test for file size %v to %v files\n", size, fcap)
|
||||
}
|
||||
|
||||
logf("Running test for %d files of size %v\n", numberOfFiles, size)
|
||||
s, err := speedTest(ctx, numberOfFiles, size, args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("speed test failed: %w", err)
|
||||
}
|
||||
results = append(results, s)
|
||||
|
||||
}
|
||||
|
||||
if useJSON {
|
||||
b, err := json.MarshalIndent(results, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal results to JSON: %w", err)
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Stats of a speed test
|
||||
type Stats struct {
|
||||
Size fs.SizeSuffix
|
||||
NumberOfFiles int
|
||||
Upload TestResult
|
||||
Download TestResult
|
||||
}
|
||||
|
||||
// TestResult of a speed test operation
|
||||
type TestResult struct {
|
||||
Bytes int64
|
||||
Duration time.Duration
|
||||
Speed fs.SizeSuffix
|
||||
}
|
||||
|
||||
// measures stats for speedTest operations
|
||||
func measure(desc string, f func() error, size fs.SizeSuffix, numberOfFiles int, tr *TestResult) error {
|
||||
start := time.Now()
|
||||
err := f()
|
||||
dt := time.Since(start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tr.Duration = dt
|
||||
tr.Bytes = int64(size) * int64(numberOfFiles)
|
||||
tr.Speed = fs.SizeSuffix(float64(tr.Bytes) / dt.Seconds())
|
||||
logf("%-20s: %vB in %v at %vB/s\n", desc, tr.Bytes, dt.Round(time.Millisecond), tr.Speed)
|
||||
return err
|
||||
}
|
||||
|
||||
func speedTest(ctx context.Context, numberOfFiles int, size fs.SizeSuffix, remote string) (*Stats, error) {
|
||||
stats := Stats{
|
||||
Size: size,
|
||||
NumberOfFiles: numberOfFiles,
|
||||
}
|
||||
|
||||
tempDirName := "rclone-speed-test-" + random.String(8)
|
||||
tempDirPath := path.Join(remote, tempDirName)
|
||||
fremote := cmd.NewFsDir([]string{tempDirPath})
|
||||
aErr := io.EOF
|
||||
defer atexit.OnError(&aErr, func() {
|
||||
err := operations.Purge(ctx, fremote, "")
|
||||
if err != nil {
|
||||
fs.Debugf(fremote, "Failed to remove temp dir %q: %v", tempDirPath, err)
|
||||
}
|
||||
})()
|
||||
|
||||
flocalDir, err := os.MkdirTemp("", "rclone-speedtest-local-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create local temp dir: %w", err)
|
||||
}
|
||||
defer atexit.OnError(&aErr, func() { _ = os.RemoveAll(flocalDir) })()
|
||||
|
||||
flocal, err := cache.Get(ctx, flocalDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create local fs: %w", err)
|
||||
}
|
||||
|
||||
fdownloadDir, err := os.MkdirTemp("", "rclone-speedtest-download-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create download temp dir: %w", err)
|
||||
}
|
||||
defer atexit.OnError(&aErr, func() { _ = os.RemoveAll(fdownloadDir) })()
|
||||
|
||||
fdownload, err := cache.Get(ctx, fdownloadDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create download fs: %w", err)
|
||||
}
|
||||
|
||||
// make the largest amount of files we will need
|
||||
files := make([]string, numberOfFiles)
|
||||
for i := range files {
|
||||
files[i] = path.Join(flocalDir, fmt.Sprintf("file%03d-%v.bin", i, size))
|
||||
}
|
||||
makefiles(size, files)
|
||||
|
||||
// upload files
|
||||
err = measure("Upload", func() error {
|
||||
return sync.CopyDir(ctx, fremote, flocal, false)
|
||||
}, size, numberOfFiles, &stats.Upload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to Copy to remote: %w", err)
|
||||
}
|
||||
|
||||
// download files
|
||||
err = measure("Download", func() error {
|
||||
return sync.CopyDir(ctx, fdownload, fremote, false)
|
||||
}, size, numberOfFiles, &stats.Download)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to Copy from remote: %w", err)
|
||||
}
|
||||
|
||||
// check files
|
||||
opt := operations.CheckOpt{
|
||||
Fsrc: flocal,
|
||||
Fdst: fdownload,
|
||||
OneWay: false,
|
||||
}
|
||||
logf("Checking file integrity\n")
|
||||
err = operations.CheckDownload(ctx, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check redownloaded files were identical: %w", err)
|
||||
}
|
||||
|
||||
return &stats, nil
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
||||
parseFileFilters := func(out string) (extensions []string) {
|
||||
// Match: - (^|/)[^/]*\.jpg$
|
||||
find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`)
|
||||
for line := range strings.SplitSeq(out, "\n") {
|
||||
for _, line := range strings.Split(out, "\n") {
|
||||
if m := find.FindStringSubmatch(line); m != nil {
|
||||
extensions = append(extensions, m[1])
|
||||
}
|
||||
|
||||
@@ -136,13 +136,13 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
{{< provider name="Intercolo Object Storage" home="https://intercolo.de/object-storage" config="/s3/#intercolo" >}}
|
||||
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
|
||||
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
|
||||
{{< provider name="Leviia Object Storage" home="https://www.leviia.com/object-storage" config="/s3/#leviia" >}}
|
||||
@@ -179,7 +179,9 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="Quatrix by Maytech" home="https://www.maytech.net/products/quatrix-business" config="/quatrix/" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||
@@ -189,7 +191,6 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
|
||||
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
|
||||
{{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
|
||||
|
||||
@@ -990,7 +990,7 @@ put them back in again.` >}}
|
||||
- Ross Smith II <ross@smithii.com>
|
||||
- Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com>
|
||||
- Sudipto Baral <sudiptobaral.me@gmail.com>
|
||||
- Sam Pegg <samrpegg@gmail.com> <70067376+S-Pegg1@users.noreply.github.com>
|
||||
- Sam Pegg <samrpegg@gmail.com>
|
||||
- liubingrun <liubr1@chinatelecom.cn>
|
||||
- Albin Parou <fumesover@gmail.com>
|
||||
- n4n5 <56606507+Its-Just-Nans@users.noreply.github.com>
|
||||
@@ -1004,15 +1004,3 @@ put them back in again.` >}}
|
||||
- Lucas Bremgartner <breml@users.noreply.github.com>
|
||||
- Binbin Qian <qianbinbin@hotmail.com>
|
||||
- cui <523516579@qq.com>
|
||||
- Tilman Vogel <tilman.vogel@web.de>
|
||||
- skbeh <60107333+skbeh@users.noreply.github.com>
|
||||
- Claudius Ellsel <claudius.ellsel@live.de>
|
||||
- Motte <37443982+dmotte@users.noreply.github.com>
|
||||
- dougal <dougal.craigwood@gmail.com> <147946567+roucc@users.noreply.github.com>
|
||||
- anon-pradip <pradipsubedi360@gmail.com>
|
||||
- Robin Rolf <imer@imer.cc>
|
||||
- Jean-Christophe Cura <jcaspes@gmail.com>
|
||||
- russcoss <russcoss@outlook.com>
|
||||
- Matt LaPaglia <mlapaglia@gmail.com>
|
||||
- Youfu Zhang <1315097+zhangyoufu@users.noreply.github.com>
|
||||
- juejinyuxitu <juejinyuxitu@outlook.com>
|
||||
|
||||
@@ -1046,7 +1046,7 @@ encodings.)
|
||||
|
||||
The following backends have known issues that need more investigation:
|
||||
|
||||
<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
<!--- start list_failures - DO NOT EDIT THIS SECTION - use rclone gendocs --->
|
||||
- `TestGoFile` (`gofile`)
|
||||
- [`TestBisyncRemoteLocal/all_changed`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [`TestBisyncRemoteLocal/backupdir`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
@@ -1055,12 +1055,12 @@ The following backends have known issues that need more investigation:
|
||||
- [`TestBisyncRemoteLocal/check_access`](https://pub.rclone.org/integration-tests/current/gofile-cmd.bisync-TestGoFile-1.txt)
|
||||
- [78 more](https://pub.rclone.org/integration-tests/current/)
|
||||
- Updated: 2025-08-21-010015
|
||||
<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
<!--- end list_failures - DO NOT EDIT THIS SECTION - use rclone gendocs --->
|
||||
|
||||
The following backends either have not been tested recently or have known issues
|
||||
that are deemed unfixable for the time being:
|
||||
|
||||
<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
<!--- start list_ignores - DO NOT EDIT THIS SECTION - use rclone gendocs --->
|
||||
- `TestCache` (`cache`)
|
||||
- `TestFileLu` (`filelu`)
|
||||
- `TestFilesCom` (`filescom`)
|
||||
@@ -1085,7 +1085,7 @@ that are deemed unfixable for the time being:
|
||||
- `TestWebdavNextcloud` (`webdav`)
|
||||
- `TestWebdavOwncloud` (`webdav`)
|
||||
- `TestnStorage` (`netstorage`)
|
||||
<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->
|
||||
<!--- end list_ignores - DO NOT EDIT THIS SECTION - use rclone gendocs --->
|
||||
([more info](https://github.com/rclone/rclone/blob/master/fstest/test_all/config.yaml))
|
||||
|
||||
The above lists are updated for each stable release of rclone. For test results
|
||||
|
||||
@@ -6,6 +6,29 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.71.2 - 2025-10-20
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.1...v1.71.2)
|
||||
|
||||
- Bug Fixes
|
||||
- build
|
||||
- update Go to 1.25.3
|
||||
- Update Docker image Alpine version to fix CVE-2025-9230
|
||||
- bisync: Fix race when CaptureOutput is used concurrently (Nick Craig-Wood)
|
||||
- doc fixes (albertony, dougal, iTrooz, Matt LaPaglia, Nick Craig-Wood)
|
||||
- index: Add missing providers (dougal)
|
||||
- serve http: Fix: logging URL on start (dougal)
|
||||
- Azurefiles
|
||||
- Fix server side copy not waiting for completion (Vikas Bhansali)
|
||||
- B2
|
||||
- Fix 1TB+ uploads (dougal)
|
||||
- Google Cloud Storage
|
||||
- Add region us-east5 (Dulani Woods)
|
||||
- Mega
|
||||
- Fix 402 payment required errors (Nick Craig-Wood)
|
||||
- Pikpak
|
||||
- Fix unnecessary retries by using URL expire parameter (Youfu Zhang)
|
||||
|
||||
## v1.71.1 - 2025-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.71.1)
|
||||
|
||||
@@ -15,8 +15,6 @@ mounting them, listing them in lots of different ways.
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
documentation, changelog and configuration walkthroughs.
|
||||
|
||||
|
||||
|
||||
```
|
||||
rclone [flags]
|
||||
```
|
||||
@@ -1015,7 +1013,7 @@ rclone [flags]
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.71.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.71.2")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
@@ -1057,6 +1055,9 @@ rclone [flags]
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
|
||||
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
|
||||
@@ -1111,3 +1112,5 @@ rclone [flags]
|
||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -15,40 +15,46 @@ output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from `rclone about remote:` is:
|
||||
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
```text
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
```
|
||||
|
||||
Where the fields are:
|
||||
|
||||
* Total: Total size available.
|
||||
* Used: Total size used.
|
||||
* Free: Total space available to this user.
|
||||
* Trashed: Total space used by trash.
|
||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
* Objects: Total number of objects in the storage.
|
||||
- Total: Total size available.
|
||||
- Used: Total size used.
|
||||
- Free: Total space available to this user.
|
||||
- Trashed: Total space used by trash.
|
||||
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
- Objects: Total number of objects in the storage.
|
||||
|
||||
All sizes are in number of bytes.
|
||||
|
||||
Applying a `--full` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
```text
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
```
|
||||
|
||||
A `--json` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
```json
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
```
|
||||
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
@@ -56,7 +62,6 @@ provided by a backend. Where the value is unlimited it is omitted.
|
||||
Some backends does not support the `rclone about` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
|
||||
|
||||
```
|
||||
rclone about remote: [flags]
|
||||
```
|
||||
@@ -73,5 +78,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -11,21 +11,23 @@ Remote authorization.
|
||||
## Synopsis
|
||||
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
rclone from a machine with a browser. Use as instructed by rclone config.
|
||||
See also the [remote setup documentation](/remote_setup).
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
- Name of a backend (e.g. "drive", "s3")
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.
|
||||
Use --template to generate HTML output via a custom Go template. If a blank
|
||||
string is provided as an argument to this flag, the default template is used.
|
||||
|
||||
```
|
||||
rclone authorize <fs name> [base64_json_blob | client_id client_secret] [flags]
|
||||
rclone authorize <backendname> [base64_json_blob | client_id client_secret] [flags]
|
||||
```
|
||||
|
||||
## Options
|
||||
@@ -40,5 +42,10 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,27 +16,34 @@ see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
```sh
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
```
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
```sh
|
||||
rclone backend features remote:
|
||||
```
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
```sh
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
```
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
```sh
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
```
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
|
||||
|
||||
```
|
||||
rclone backend <command> remote:path [opts] <args> [flags]
|
||||
```
|
||||
@@ -56,7 +63,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -64,5 +71,10 @@ Important flags useful for most commands
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -16,18 +16,19 @@ Perform bidirectional synchronization between two paths.
|
||||
bidirectional cloud sync solution in rclone.
|
||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||
On each successive run it will:
|
||||
|
||||
- list files on Path1 and Path2, and check for changes on each side.
|
||||
Changes include `New`, `Newer`, `Older`, and `Deleted` files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||
before using, or data loss can result. Questions can be asked in the
|
||||
[Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
|
||||
|
||||
```
|
||||
rclone bisync remote1:path1 remote2:path2 [flags]
|
||||
```
|
||||
@@ -69,7 +70,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for anything which can copy a file
|
||||
|
||||
```
|
||||
```text
|
||||
--check-first Do all the checks before starting transfers
|
||||
-c, --checksum Check for changes with size & checksum (if available, or fallback to size only)
|
||||
--compare-dest stringArray Include additional server-side paths during comparison
|
||||
@@ -110,7 +111,7 @@ Flags for anything which can copy a file
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -120,7 +121,7 @@ Important flags useful for most commands
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -148,5 +149,10 @@ Flags for filtering directory listings
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -14,15 +14,21 @@ Sends any files to standard output.
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
```sh
|
||||
rclone cat remote:path/to/file
|
||||
```
|
||||
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
```sh
|
||||
rclone cat remote:path/to/dir
|
||||
```
|
||||
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
```sh
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
```
|
||||
|
||||
Use the `--head` flag to print characters only at the start, `--tail` for
|
||||
the end and `--offset` and `--count` to print a section in the middle.
|
||||
@@ -33,14 +39,17 @@ Use the `--separator` flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
- bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
```sh
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
```
|
||||
|
||||
* powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
|
||||
- powershell:
|
||||
|
||||
```powershell
|
||||
rclone --include "*.txt" --separator "`n" cat remote:path/to/dir
|
||||
```
|
||||
|
||||
```
|
||||
rclone cat remote:path [flags]
|
||||
@@ -65,7 +74,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -95,12 +104,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -52,7 +52,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
|
||||
|
||||
```
|
||||
rclone check source:path dest:path [flags]
|
||||
```
|
||||
@@ -79,7 +78,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags used for check commands
|
||||
|
||||
```
|
||||
```text
|
||||
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
|
||||
```
|
||||
|
||||
@@ -87,7 +86,7 @@ Flags used for check commands
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -117,12 +116,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -47,7 +47,6 @@ you what happened to it. These are reminiscent of diff files.
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
|
||||
|
||||
```
|
||||
rclone checksum <hash> sumfile dst:path [flags]
|
||||
```
|
||||
@@ -73,7 +72,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Flags for filtering directory listings
|
||||
|
||||
```
|
||||
```text
|
||||
--delete-excluded Delete files on dest excluded from sync
|
||||
--exclude stringArray Exclude files matching pattern
|
||||
--exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
|
||||
@@ -103,12 +102,17 @@ Flags for filtering directory listings
|
||||
|
||||
Flags for listing directories
|
||||
|
||||
```
|
||||
```text
|
||||
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
|
||||
--fast-list Use recursive list if available; uses more memory but fewer transactions
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -13,7 +13,6 @@ Clean up the remote if possible.
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
|
||||
|
||||
```
|
||||
rclone cleanup remote:path [flags]
|
||||
```
|
||||
@@ -31,7 +30,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
Important flags useful for most commands
|
||||
|
||||
```
|
||||
```text
|
||||
-n, --dry-run Do a trial run with no permanent changes
|
||||
-i, --interactive Enable interactive mode
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
@@ -39,5 +38,10 @@ Important flags useful for most commands
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user