mirror of
https://github.com/rclone/rclone.git
synced 2026-01-04 17:43:50 +00:00
Compare commits
158 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab60a77aba | ||
|
|
09535a06f7 | ||
|
|
173b720173 | ||
|
|
6660e6ec7c | ||
|
|
14c604335e | ||
|
|
bfcb23b7b2 | ||
|
|
46dbdb8cb7 | ||
|
|
17932fcc38 | ||
|
|
77faa787e1 | ||
|
|
0701dd55cd | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e | ||
|
|
4fc4898287 | ||
|
|
b003169088 | ||
|
|
babd112665 | ||
|
|
71b9b4ad7a | ||
|
|
4368863fcb | ||
|
|
04d49bf0ea | ||
|
|
d7aa37d263 | ||
|
|
379dffa61c | ||
|
|
5fd4ece31f | ||
|
|
fc3f95190b | ||
|
|
d6f5652b65 | ||
|
|
b5cbb7520d | ||
|
|
a170dfa55b | ||
|
|
1449c5b5ba | ||
|
|
35fe609722 | ||
|
|
cce399515f | ||
|
|
8c5af2f51c | ||
|
|
c639d3656e | ||
|
|
d9fbbba5c3 | ||
|
|
fd87560388 | ||
|
|
d87720a787 | ||
|
|
d541caa52b | ||
|
|
fd1665ae93 | ||
|
|
457d80e8a9 | ||
|
|
c5a3e86df8 | ||
|
|
4026e8db20 | ||
|
|
c9ce686231 | ||
|
|
b085598cbc | ||
|
|
bb47dccdeb | ||
|
|
7a279d2789 | ||
|
|
9bd5df658a | ||
|
|
d512e4d566 | ||
|
|
3dd68c824a | ||
|
|
fbe73c993b | ||
|
|
d915f75edf | ||
|
|
26b629f42f | ||
|
|
ceaac2194c | ||
|
|
1f14b6aa35 | ||
|
|
dd75af6a18 | ||
|
|
99e8a63df2 | ||
|
|
0019e18ac3 | ||
|
|
218c3bf6e9 | ||
|
|
8f9702583d | ||
|
|
e6578fb5a1 | ||
|
|
fa1d7da272 | ||
|
|
813708c24d | ||
|
|
fee4716343 | ||
|
|
6e9a675b3f | ||
|
|
7f5a444350 | ||
|
|
d2916ac5c7 | ||
|
|
3369a15285 | ||
|
|
58aee30de7 | ||
|
|
ef919241a6 | ||
|
|
d5386bb9a7 | ||
|
|
bf46ea5611 | ||
|
|
b8a379c9c9 | ||
|
|
8c37a9c2ef | ||
|
|
963a72ce01 | ||
|
|
a4962e21d1 | ||
|
|
9e200531b1 | ||
|
|
04683f2032 | ||
|
|
b41f7994da | ||
|
|
13a5ffe391 | ||
|
|
85deea82e4 | ||
|
|
89a8ea7a91 | ||
|
|
c8912eb6a0 | ||
|
|
01674949a1 | ||
|
|
98e1d3ee73 | ||
|
|
50d7a80331 | ||
|
|
bc3e8e1abd | ||
|
|
30e80d0716 | ||
|
|
f288920696 | ||
|
|
fa2bbd705c | ||
|
|
43a794860f | ||
|
|
adfe6b3bad | ||
|
|
091ccb649c | ||
|
|
2e02d49578 | ||
|
|
514535ad46 | ||
|
|
b010591c96 | ||
|
|
1aaee9edce |
8
.github/workflows/build.yml
vendored
8
.github/workflows/build.yml
vendored
@@ -100,7 +100,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -222,7 +222,7 @@ jobs:
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
check-latest: true
|
||||
@@ -291,7 +291,7 @@ jobs:
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
docs/content/{authors,bugs,changelog,cluster,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
@@ -311,7 +311,7 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
@@ -628,7 +628,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
15
Makefile
15
Makefile
@@ -100,6 +100,7 @@ compiletest:
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@bin/markdown-lint
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
@@ -113,21 +114,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
go generate ./lib/transform
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
go run bin/make_bisync_docs.go ./docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
|
||||
@@ -50,6 +50,7 @@ directories to and from different cloud storage providers.
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
@@ -59,6 +60,7 @@ directories to and from different cloud storage providers.
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
@@ -94,6 +96,7 @@ directories to and from different cloud storage providers.
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
@@ -105,6 +108,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -1338,9 +1338,9 @@ func (f *Fs) containerOK(container string) bool {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if !f.containerOK(containerName) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -1348,16 +1348,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(containerName)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
@@ -1393,14 +1393,47 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2119,7 +2152,6 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
@@ -2765,8 +2797,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2778,7 +2808,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2820,7 +2850,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3156,6 +3186,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -1313,10 +1313,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
srcURL := srcObj.fileClient().URL()
|
||||
fc := f.fileClient(remote)
|
||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
var properties file.GetPropertiesResponse
|
||||
pollTime := 100 * time.Millisecond
|
||||
|
||||
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
|
||||
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = properties.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||
|
||||
@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -890,14 +890,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2192,13 +2224,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
ChunkSize: up.chunkSize,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
return info, up, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -2428,6 +2464,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
|
||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
// There is some confusion about how this is actually
|
||||
// returned. The []struct has worked for many years, but in
|
||||
// https://github.com/rclone/rclone/issues/8776 box was
|
||||
// returning it returned not as a list. We don't actually use
|
||||
// this so comment it out.
|
||||
//
|
||||
// Order struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
//
|
||||
// Order []struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
for part := range strings.SplitSeq(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
|
||||
@@ -187,7 +187,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
@@ -241,18 +240,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || u.f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
@@ -366,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
@@ -633,7 +635,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
|
||||
@@ -598,7 +598,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -18,7 +18,7 @@ type headerLink struct {
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
for link := range strings.SplitSeq(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
for part := range strings.SplitSeq(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||
for _, extensionText := range extensionsIn {
|
||||
for _, extension := range strings.Split(extensionText, ",") {
|
||||
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||
if extension == "" {
|
||||
continue
|
||||
|
||||
@@ -386,7 +386,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
@@ -520,7 +519,6 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
|
||||
@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
FldID any `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
|
||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-east5",
|
||||
Help: "Ohio",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
@@ -760,7 +763,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -768,16 +771,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
@@ -820,14 +823,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1462,6 +1497,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
"x-archive-auto-make-bucket": "1",
|
||||
"x-archive-queue-derive": "0",
|
||||
"x-archive-keep-old-version": "0",
|
||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-metadata-directive": "COPY",
|
||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||
"x-archive-filemeta-md5": srcObj.md5,
|
||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
for _, v := range seg {
|
||||
newValues = append(newValues, url.QueryEscape(v))
|
||||
}
|
||||
return strings.Join(newValues, "/")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -59,31 +60,43 @@ const (
|
||||
configVersion = 1
|
||||
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
key string
|
||||
name string
|
||||
domain string
|
||||
realm string
|
||||
clientID string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
// The list of services and their settings for supporting traditional OAuth.
|
||||
// Please keep these in alphabetical order, but with jottacloud first.
|
||||
func getServices() []service {
|
||||
return []service{
|
||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -159,36 +172,44 @@ func init() {
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch conf.State {
|
||||
case "":
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||
return nil, errors.New("not supported by this backend")
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
Help: `Standard authentication.
|
||||
This is primarily supported by the official service, but may also be
|
||||
supported by some white-label services. It is designed for command-line
|
||||
applications, and you will be asked to enter a single-use personal login
|
||||
token which you must manually generate from the account security settings
|
||||
in the web interface of your service.`,
|
||||
}, {
|
||||
Value: "traditional",
|
||||
Help: `Traditional authentication.
|
||||
This is supported by the official service and all white-label services
|
||||
that rclone knows about. You will be asked which service to connect to.
|
||||
It has a limitation of only a single active authentication at a time. You
|
||||
need to be on, or have access to, a machine with an internet-connected
|
||||
web browser.`,
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
Help: `Legacy authentication.
|
||||
This is no longer supported by any known services and not recommended
|
||||
used. You will be asked for your account's username and password.`,
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
loginToken := conf.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
@@ -203,10 +224,50 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "traditional":
|
||||
services := getServices()
|
||||
options := make([]fs.OptionExample, 0, len(services))
|
||||
for _, service := range services {
|
||||
options = append(options, fs.OptionExample{
|
||||
Value: service.key,
|
||||
Help: service.name,
|
||||
})
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||
options)
|
||||
case "traditional_type":
|
||||
services := getServices()
|
||||
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||
}
|
||||
service := services[i]
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||
}
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, service.clientID)
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||
TokenURL: wellKnown.TokenEndpoint,
|
||||
ClientID: service.clientID,
|
||||
Scopes: service.scopes,
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
@@ -214,7 +275,7 @@ machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||
@@ -223,16 +284,16 @@ machines.`)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
m.Set(configUsername, conf.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("password", conf.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -242,12 +303,12 @@ machines.`)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
@@ -260,7 +321,7 @@ machines.`)
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
@@ -272,58 +333,6 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -331,7 +340,7 @@ section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
if conf.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
@@ -372,7 +381,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
device := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -432,7 +441,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
mountpoint := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -463,7 +472,7 @@ You may create a new by entering a unique name.`, device)
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
@@ -478,7 +487,7 @@ You may create a new by entering a unique name.`, device)
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -929,12 +938,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
@@ -1000,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
@@ -1009,13 +1025,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -841,7 +842,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
err := os.Remove(localPath)
|
||||
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||
if os.Chmod(localPath, 0o600) == nil {
|
||||
err = os.Remove(localPath)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
whenRFC := when.Local().Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||
// and https://github.com/golang/go/issues/26295
|
||||
func TestRmdirWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -400,7 +400,7 @@ type quirks struct {
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
for flag := range strings.SplitSeq(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
f.speedupAny = false
|
||||
uniqueValidPatterns := make(map[string]any)
|
||||
|
||||
for _, pattern := range strings.Split(patternString, ",") {
|
||||
for pattern := range strings.SplitSeq(patternString, ",") {
|
||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||
if pattern == "" {
|
||||
continue
|
||||
|
||||
@@ -325,13 +325,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// listDir lists the bucket to the entries
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to entries
|
||||
@@ -354,15 +353,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -629,6 +659,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -243,7 +243,6 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||
numSet = 0
|
||||
for k, v := range metadata {
|
||||
k, v := k, v
|
||||
switch k {
|
||||
case "mtime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
@@ -422,12 +421,7 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
|
||||
@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
|
||||
@@ -254,15 +254,47 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -411,24 +443,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -765,6 +797,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -136,8 +137,25 @@ type Link struct {
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
if l == nil || l.URL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Primary validation: check URL's expire query parameter
|
||||
if u, err := url.Parse(l.URL); err == nil {
|
||||
if expireStr := u.Query().Get("expire"); expireStr != "" {
|
||||
// Try parsing as Unix timestamp (seconds)
|
||||
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
|
||||
expireTime := time.Unix(expireInt, 0)
|
||||
return time.Now().Add(10 * time.Second).Before(expireTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback validation: use the Expire field if URL parsing didn't work
|
||||
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
|
||||
99
backend/pikpak/api/types_test.go
Normal file
99
backend/pikpak/api/types_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestLinkValid tests the Link.Valid method for various scenarios
|
||||
func TestLinkValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
link *Link
|
||||
expected bool
|
||||
desc string
|
||||
}{
|
||||
{
|
||||
name: "nil link",
|
||||
link: nil,
|
||||
expected: false,
|
||||
desc: "nil link should be invalid",
|
||||
},
|
||||
{
|
||||
name: "empty URL",
|
||||
link: &Link{URL: ""},
|
||||
expected: false,
|
||||
desc: "empty URL should be invalid",
|
||||
},
|
||||
{
|
||||
name: "valid URL with future expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL with future expire parameter should be valid",
|
||||
},
|
||||
{
|
||||
name: "expired URL with past expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL with past expire parameter should be invalid",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter takes precedence over Expire field",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL expire parameter should take precedence over Expire field",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter within 10 second buffer should be invalid",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL expire parameter within 10 second buffer should be invalid",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when no URL expire parameter",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL has no expire parameter",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when URL expire parameter is invalid",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file?expire=invalid",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)),
|
||||
},
|
||||
expected: false,
|
||||
desc: "should be invalid when both URL expire and Expire field are expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.link.Valid()
|
||||
if result != tt.expected {
|
||||
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -87,6 +89,17 @@ The value can also be provided with --protondrive-2fa=000000
|
||||
The 2FA code of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "otp_secret_key",
|
||||
Help: `The OTP secret key
|
||||
|
||||
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
|
||||
|
||||
The OTP secret key of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: clientUIDKey,
|
||||
Help: "Client uid key (internal use only)",
|
||||
@@ -191,6 +204,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
MailboxPassword string `config:"mailbox_password"`
|
||||
TwoFA string `config:"2fa"`
|
||||
OtpSecretKey string `config:"otp_secret_key"`
|
||||
|
||||
// advanced
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -356,7 +370,15 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
|
||||
config.FirstLoginCredential.Username = opt.Username
|
||||
config.FirstLoginCredential.Password = opt.Password
|
||||
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
||||
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
|
||||
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
||||
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
|
||||
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
|
||||
}
|
||||
config.FirstLoginCredential.TwoFA = code
|
||||
}
|
||||
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
||||
@@ -395,6 +417,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
if opt.OtpSecretKey != "" {
|
||||
var err error
|
||||
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
|
||||
1059
backend/s3/s3.go
1059
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -200,7 +200,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||
|
||||
const numGoroutines = 10
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
mockFile := newMockFile()
|
||||
pool.pool = append(pool.pool, mockFile)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
// Test concurrent get operations
|
||||
done := make(chan bool, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
go func() {
|
||||
defer func() { done <- true }()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
<-done
|
||||
}
|
||||
|
||||
|
||||
@@ -192,6 +192,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if root is empty or ends with / (must be a directory)
|
||||
isRootDir := isPathDir(root)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
@@ -218,6 +221,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Skip stat check if root is already a directory
|
||||
if isRootDir {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -894,6 +902,11 @@ func ensureSuffix(s, suffix string) string {
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
// isPathDir determines if a path represents a directory based on trailing slash
|
||||
func isPathDir(path string) bool {
|
||||
return path == "" || strings.HasSuffix(path, "/")
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
|
||||
41
backend/smb/smb_internal_test.go
Normal file
41
backend/smb/smb_internal_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Unit tests for internal SMB functions
|
||||
package smb
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestIsPathDir tests the isPathDir function logic
|
||||
func TestIsPathDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Empty path should be considered a directory
|
||||
{"", true},
|
||||
|
||||
// Paths with trailing slash should be directories
|
||||
{"/", true},
|
||||
{"share/", true},
|
||||
{"share/dir/", true},
|
||||
{"share/dir/subdir/", true},
|
||||
|
||||
// Paths without trailing slash should not be directories
|
||||
{"share", false},
|
||||
{"share/dir", false},
|
||||
{"share/dir/file", false},
|
||||
{"share/dir/subdir/file", false},
|
||||
|
||||
// Edge cases
|
||||
{"share//", true},
|
||||
{"share///", true},
|
||||
{"share/dir//", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isPathDir(tt.path)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -561,6 +561,21 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// Fetch the base container's policy to be used if/when we need to create a
|
||||
// segments container to ensure we use the same policy.
|
||||
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err := f.c.Container(ctx, container)
|
||||
|
||||
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
|
||||
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
|
||||
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -590,6 +605,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
f.opt.UseSegmentsContainer.Valid = true
|
||||
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
||||
}
|
||||
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
@@ -773,21 +789,20 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
@@ -818,14 +833,46 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1101,6 +1148,13 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
|
||||
container: dstContainer,
|
||||
}
|
||||
if f.opt.UseSegmentsContainer.Value {
|
||||
if f.opt.StoragePolicy == "" {
|
||||
_, err = f.fetchStoragePolicy(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
su.container += segmentsContainerSuffix
|
||||
err = f.makeContainer(ctx, su.container)
|
||||
if err != nil {
|
||||
@@ -1650,6 +1704,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -76,6 +76,7 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
@@ -195,4 +196,50 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
require.Equal(t, obj.Size(), objTarget.Size())
|
||||
}
|
||||
|
||||
func (f *Fs) testPolicyDiscovery(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
container := "testPolicyDiscovery-1"
|
||||
// Reset the policy so we can test if it is populated.
|
||||
f.opt.StoragePolicy = ""
|
||||
err := f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Default policy for SAIO image is 1replica.
|
||||
assert.Equal(t, "1replica", f.opt.StoragePolicy)
|
||||
|
||||
// Create a container using a non-default policy, and check to ensure
|
||||
// that the created segments container uses the same non-default policy.
|
||||
policy := "Policy-1"
|
||||
container = "testPolicyDiscovery-2"
|
||||
|
||||
f.opt.StoragePolicy = policy
|
||||
err = f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset the policy so we can test if it is populated, and set to the
|
||||
// non-default policy.
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
|
||||
// Test that when a segmented upload container is made, the newly
|
||||
// created container inherits the non-default policy of the base
|
||||
// container.
|
||||
f.opt.StoragePolicy = ""
|
||||
f.opt.UseSegmentsContainer.Value = true
|
||||
su, err := f.newSegmentedUpload(ctx, container, "")
|
||||
require.NoError(t, err)
|
||||
// The container name we expected?
|
||||
segmentsContainer := container + segmentsContainerSuffix
|
||||
assert.Equal(t, segmentsContainer, su.container)
|
||||
// The policy we expected?
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, su.container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -21,7 +21,6 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
|
||||
@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
for checksum := range strings.SplitSeq(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
|
||||
159
bin/make_bisync_docs.go
Normal file
159
bin/make_bisync_docs.go
Normal file
@@ -0,0 +1,159 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/runs"
|
||||
"github.com/stretchr/testify/assert/yaml"
|
||||
)
|
||||
|
||||
var path = flag.String("path", "./docs/content/", "root path")
|
||||
|
||||
const (
|
||||
configFile = "fstest/test_all/config.yaml"
|
||||
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
|
||||
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing ignores: %v", err)
|
||||
}
|
||||
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing failures: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// replaceBetween replaces the text between startSep and endSep with fn()
|
||||
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
|
||||
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
doc := string(b)
|
||||
|
||||
before, after, found := strings.Cut(doc, startSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", startSep)
|
||||
}
|
||||
_, after, found = strings.Cut(after, endSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", endSep)
|
||||
}
|
||||
|
||||
replaceSection, err := fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
|
||||
|
||||
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIgnores updates the list of ignores from config.yaml
|
||||
func getIgnores() (string, error) {
|
||||
config, err := parseConfig()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse config: %v", err)
|
||||
}
|
||||
s := ""
|
||||
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
|
||||
return cmp.Compare(a.Remote, b.Remote)
|
||||
})
|
||||
for _, backend := range config.Backends {
|
||||
include := false
|
||||
|
||||
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
|
||||
include = true
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
|
||||
for _, ignore := range backend.Ignore {
|
||||
if strings.Contains(strings.ToLower(ignore), "bisync") {
|
||||
if !include { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
include = true
|
||||
s += fmt.Sprintf(" - `%s`\n", ignore)
|
||||
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// getFailures updates the list of currently failing tests from the integration tests server
|
||||
func getFailures() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
r := runs.Report{}
|
||||
err = json.Unmarshal(buf.Bytes(), &r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal json: %v", err)
|
||||
}
|
||||
|
||||
s := ""
|
||||
for _, run := range r.Failed {
|
||||
for i, t := range run.FailedTests {
|
||||
if strings.Contains(strings.ToLower(t), "bisync") {
|
||||
|
||||
if i == 0 { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
|
||||
}
|
||||
|
||||
url := integrationTestsHTMLURL + run.TrialName
|
||||
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
|
||||
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
|
||||
|
||||
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
|
||||
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s += fmt.Sprintf("- Updated: %v", r.DateTime)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// parseConfig reads and parses the config.yaml file
|
||||
func parseConfig() (*runs.Config, error) {
|
||||
d, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
config := &runs.Config{}
|
||||
err = yaml.Unmarshal(d, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
17
bin/markdown-lint
Executable file
17
bin/markdown-lint
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run markdown linting locally
|
||||
set -e
|
||||
|
||||
# Workflow
|
||||
build=.github/workflows/build.yml
|
||||
|
||||
# Globs read from from $build
|
||||
globs=$(awk '/- name: Check Markdown format/{f=1;next} f && /globs:/{f=2;next} f==2 && NF{if($1=="-"){exit} print $0}' $build)
|
||||
|
||||
if [ -z "$globs" ]; then
|
||||
echo "Error: No globs found in Check Markdown step in $build" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
|
||||
@@ -33,7 +33,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
for _, line := range bytes.Split(out, []byte{'\n'}) {
|
||||
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -51,47 +51,52 @@ output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```text" + `
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```" + `
|
||||
|
||||
Where the fields are:
|
||||
|
||||
* Total: Total size available.
|
||||
* Used: Total size used.
|
||||
* Free: Total space available to this user.
|
||||
* Trashed: Total space used by trash.
|
||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
* Objects: Total number of objects in the storage.
|
||||
- Total: Total size available.
|
||||
- Used: Total size used.
|
||||
- Free: Total space available to this user.
|
||||
- Trashed: Total space used by trash.
|
||||
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
- Objects: Total number of objects in the storage.
|
||||
|
||||
All sizes are in number of bytes.
|
||||
|
||||
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```text" + `
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```" + `
|
||||
|
||||
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```" + `
|
||||
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
|
||||
@@ -23,21 +23,23 @@ func init() {
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
|
||||
Use: "authorize <backendname> [base64_json_blob | client_id client_secret]",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
rclone from a machine with a browser. Use as instructed by rclone config.
|
||||
See also the [remote setup documentation](/remote_setup).
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
- Name of a backend (e.g. "drive", "s3")
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Use --template to generate HTML output via a custom Go template. If a blank
|
||||
string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestAuthorizeCommand(t *testing.T) {
|
||||
// Test that the Use string is correctly formatted
|
||||
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
|
||||
if commandDefinition.Use != "authorize <backendname> [base64_json_blob | client_id client_secret]" {
|
||||
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestAuthorizeCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
helpOutput := buf.String()
|
||||
if !strings.Contains(helpOutput, "authorize <fs name>") {
|
||||
if !strings.Contains(helpOutput, "authorize <backendname>") {
|
||||
t.Errorf("Help output doesn't contain correct usage information")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,26 +37,33 @@ see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```sh" + `
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```" + `
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
` + "```sh" + `
|
||||
rclone backend features remote:
|
||||
` + "```" + `
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```sh" + `
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```" + `
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```sh" + `
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```" + `
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
[backend/command](/rc/#backend-command) in the rc docs.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -4,15 +4,19 @@ package bilib
|
||||
import (
|
||||
"bytes"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
var mu sync.Mutex
|
||||
buf := &bytes.Buffer{}
|
||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
buf.WriteString(text)
|
||||
})
|
||||
defer func() {
|
||||
@@ -20,5 +24,7 @@ func CaptureOutput(fun func()) []byte {
|
||||
log.Handler.SetLevel(oldLevel)
|
||||
}()
|
||||
fun()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
@@ -177,6 +177,7 @@ var (
|
||||
// "src and dst identical but can't set mod time without deleting and re-uploading"
|
||||
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
|
||||
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
|
||||
argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10
|
||||
)
|
||||
|
||||
// bisyncTest keeps all test data in a single place
|
||||
@@ -284,6 +285,15 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
if !isLocal(*fstest.RemoteName) {
|
||||
t.Skip("TestBisyncConcurrent is skipped on non-local")
|
||||
}
|
||||
if *argTestCase != "" && *argTestCase != "basic" {
|
||||
t.Skip("TestBisyncConcurrent only tests 'basic'")
|
||||
}
|
||||
if *argPCount < 2 {
|
||||
t.Skip("TestBisyncConcurrent is pointless with -pcount < 2")
|
||||
}
|
||||
if *argGolden {
|
||||
t.Skip("skip TestBisyncConcurrent when goldenizing")
|
||||
}
|
||||
oldArgTestCase := argTestCase
|
||||
*argTestCase = "basic"
|
||||
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
|
||||
@@ -292,8 +302,9 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
*ignoreLogs = false
|
||||
})
|
||||
|
||||
t.Run("test1", testParallel)
|
||||
t.Run("test2", testParallel)
|
||||
for i := 0; i < *argPCount; i++ {
|
||||
t.Run(fmt.Sprintf("test%v", i), testParallel)
|
||||
}
|
||||
}
|
||||
|
||||
func testParallel(t *testing.T) {
|
||||
@@ -465,6 +476,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
|
||||
// Prepare initial content
|
||||
b.cleanupCase(ctx)
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision())
|
||||
initFs, err := cache.Get(ctx, b.initDir)
|
||||
@@ -510,7 +522,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
require.NoError(b.t, err)
|
||||
b.step = 0
|
||||
b.stopped = false
|
||||
for _, line := range strings.Split(string(scenBuf), "\n") {
|
||||
for line := range strings.SplitSeq(string(scenBuf), "\n") {
|
||||
comment := strings.Index(line, "#")
|
||||
if comment != -1 {
|
||||
line = line[:comment]
|
||||
@@ -641,12 +653,11 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||
_ = operations.Purge(ctx, b.fs1, "")
|
||||
_ = operations.Purge(ctx, b.fs2, "")
|
||||
_ = os.RemoveAll(b.workDir)
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
}
|
||||
|
||||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -925,7 +936,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
// splitLine splits scenario line into tokens and performs
|
||||
// substitutions that involve whitespace or control chars.
|
||||
func splitLine(line string) (args []string) {
|
||||
for _, s := range strings.Fields(line) {
|
||||
for s := range strings.FieldsSeq(line) {
|
||||
b := []byte(whitespaceReplacer.Replace(s))
|
||||
b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte {
|
||||
c, _ := strconv.ParseUint(string(b[5:7]), 16, 8)
|
||||
@@ -1007,6 +1018,7 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
}
|
||||
// test if modtimes are writeable
|
||||
testSetModtime := func(f fs.Fs) {
|
||||
ctx := accounting.WithStatsGroup(ctx, random.String(8)) // keep stats separate
|
||||
in := bytes.NewBufferString("modtime_write_test")
|
||||
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
|
||||
obj, err := f.Put(ctx, in, objinfo)
|
||||
@@ -1018,6 +1030,11 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
if err == fs.ErrorCantSetModTime {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
}
|
||||
if err == fs.ErrorCantSetModTimeWithoutDelete { // transfers stats expected to differ on this backend
|
||||
logReplacements = append(logReplacements, `^.*There was nothing to transfer.*$`, dropMe)
|
||||
} else {
|
||||
require.NoError(b.t, err)
|
||||
}
|
||||
if !f.Features().IsLocal {
|
||||
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
|
||||
}
|
||||
@@ -1496,7 +1513,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
|
||||
fs.Log(nil, divider)
|
||||
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") {
|
||||
fs.Logf(nil, "| %s", strings.TrimSpace(line))
|
||||
}
|
||||
}
|
||||
@@ -1619,6 +1636,14 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
if b.ignoreBlankHash || !b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5) {
|
||||
// if either side lacks support for md5, need to ignore the "nothing to transfer" log,
|
||||
// as sync may in fact need to transfer, where it would otherwise skip based on hash or just update modtime.
|
||||
// transfer stats will also differ in fs.ErrorCantSetModTimeWithoutDelete scenario, and where --download-hash is needed.
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*There was nothing to transfer.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
||||
@@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
opts := strings.SplitSeq(b.opt.CompareFlag, ",")
|
||||
for opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
|
||||
@@ -51,14 +51,15 @@ var longHelp = shortHelp + makeHelp(`
|
||||
bidirectional cloud sync solution in rclone.
|
||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||
On each successive run it will:
|
||||
|
||||
- list files on Path1 and Path2, and check for changes on each side.
|
||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||
before using, or data loss can result. Questions can be asked in the
|
||||
[Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.`)
|
||||
|
||||
@@ -434,7 +434,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
}
|
||||
|
||||
fulllisting, err = b.loadListingNum(listingNum)
|
||||
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
@@ -610,6 +609,11 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
if new == nil { // should not happen. log error and debug info
|
||||
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
|
||||
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
|
||||
continue
|
||||
}
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
}
|
||||
@@ -703,8 +707,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
accounting.Stats(ctx).RemoveDoneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
|
||||
@@ -245,10 +245,8 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||
}
|
||||
}
|
||||
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
if accounting.MaxCompletedTransfers != -1 {
|
||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||
}
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown
|
||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||
b.testFn()
|
||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -59,6 +61,7 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -133,6 +136,7 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -87,6 +89,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -21,7 +21,9 @@ INFO : Using filters file {workdir/}exclude-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}exclude-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -136,7 +138,9 @@ INFO : Using filters file {workdir/}include-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}include-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -90,7 +92,9 @@ INFO : Copying Path2 files to Path1
|
||||
INFO : Checking access health
|
||||
INFO : Found 2 matching ".chk_file" files on both paths
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -102,7 +104,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -23,7 +23,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -80,7 +82,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : subdir: Making directory
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -124,6 +126,7 @@ INFO : Path2: 1 changes: [32m 0 new[0m, [33m 0 modified[0m, [31m
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}RCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -148,7 +151,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -188,6 +193,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : subdir: Removing directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -27,7 +27,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}測試Русский ěáñ/" with Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}測試Русский ěáñ/" vs Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,7 +86,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -174,7 +178,9 @@ INFO : Using filters file {workdir/}測試_filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}測試_filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -20,7 +20,9 @@ INFO : Using filters file {workdir/}filtersfile.flt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.flt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -81,7 +83,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -146,7 +150,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Skipped storing filters file hash to {workdir/}filtersfile.txt.{hashtype} as --dry-run is set
|
||||
INFO : Copying Path2 files to Path1
|
||||
NOTICE: - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
NOTICE: - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -33,7 +35,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,6 +86,7 @@ INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file4.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file5.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,6 +86,7 @@ INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file5.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -17,7 +17,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -115,7 +117,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -154,6 +158,7 @@ INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file2.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}subdir/file21.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -171,6 +176,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -39,6 +39,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -22,6 +22,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
@@ -129,6 +130,7 @@ INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : Resync updating listings
|
||||
@@ -158,6 +160,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -45,6 +47,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -43,15 +43,21 @@ var commandDefinition = &cobra.Command{
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|||sh
|
||||
rclone cat remote:path/to/file
|
||||
|||
|
||||
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Use the |--head| flag to print characters only at the start, |--tail| for
|
||||
the end and |--offset| and |--count| to print a section in the middle.
|
||||
@@ -62,14 +68,17 @@ Use the |--separator| flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
- bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
* powershell:
|
||||
- powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
`, "|", "`"),
|
||||
|||powershell
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
|||`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -74,8 +74,7 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
option for more information.`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||
|
||||
@@ -17,8 +17,7 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
versions. Not supported by all remotes.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
"groups": "Important",
|
||||
|
||||
17
cmd/cmd.go
17
cmd/cmd.go
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/cluster"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -481,6 +482,22 @@ func initConfig() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Run as a cluster worker if configured, otherwise ignoring
|
||||
// the command given on the command line
|
||||
if ci.Cluster != "" {
|
||||
if ci.ClusterID == "" || ci.ClusterID == "0" {
|
||||
fs.Infof(nil, "Running in cluster mode %q as controller", ci.ClusterID)
|
||||
} else {
|
||||
fs.Infof(nil, "Running in cluster mode %q as worker with id %q", ci.ClusterID, ci.ClusterID)
|
||||
worker, err := cluster.NewWorker(ctx)
|
||||
if err != nil || worker == nil {
|
||||
fs.Fatalf(nil, "Failed to start cluster worker: %v", err)
|
||||
}
|
||||
// Do not continue with the main thread
|
||||
select {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
|
||||
@@ -37,6 +37,7 @@ func init() {
|
||||
configCommand.AddCommand(configDisconnectCommand)
|
||||
configCommand.AddCommand(configUserInfoCommand)
|
||||
configCommand.AddCommand(configEncryptionCommand)
|
||||
configCommand.AddCommand(configStringCommand)
|
||||
}
|
||||
|
||||
var configCommand = &cobra.Command{
|
||||
@@ -44,8 +45,7 @@ var configCommand = &cobra.Command{
|
||||
Short: `Enter an interactive configuration session.`,
|
||||
Long: `Enter an interactive configuration session where you can setup new
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
password to protect your configuration.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -134,9 +134,7 @@ sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
`,
|
||||
It should be double checked before posting as the redaction may not be perfect.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.64",
|
||||
},
|
||||
@@ -178,8 +176,8 @@ var configProvidersCommand = &cobra.Command{
|
||||
|
||||
var updateRemoteOpt config.UpdateRemoteOpt
|
||||
|
||||
var configPasswordHelp = strings.ReplaceAll(`
|
||||
Note that if the config process would normally ask a question the
|
||||
var configPasswordHelp = strings.ReplaceAll(
|
||||
`Note that if the config process would normally ask a question the
|
||||
default is taken (unless |--non-interactive| is used). Each time
|
||||
that happens rclone will print or DEBUG a message saying how to
|
||||
affect the value taken.
|
||||
@@ -205,29 +203,29 @@ it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
|||
|
||||
|
||||
@@ -250,7 +248,9 @@ The keys of |Option| are used as follows:
|
||||
If |Error| is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||sh
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||
|
||||
|
||||
Note that when using |--continue| all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
@@ -264,8 +264,7 @@ not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that |bin/config.py| in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
`, "|", "`")
|
||||
as a readable demonstration.`, "|", "`")
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create name type [key value]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
@@ -275,13 +274,18 @@ should be passed in pairs of |key| |value| or as |key=value|.
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||sh
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||sh
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -344,13 +348,18 @@ pairs of |key| |value| or as |key=value|.
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||sh
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||sh
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -388,12 +397,13 @@ The |password| should be passed in in clear (unobscured).
|
||||
|
||||
For example, to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||sh
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||
|
||||
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
both support obscuring passwords directly.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -441,8 +451,7 @@ var configReconnectCommand = &cobra.Command{
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
This normally means going through the interactive oauth flow again.
|
||||
`,
|
||||
This normally means going through the interactive oauth flow again.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
@@ -461,8 +470,7 @@ var configDisconnectCommand = &cobra.Command{
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
To reconnect use "rclone config reconnect".
|
||||
`,
|
||||
To reconnect use "rclone config reconnect".`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -490,8 +498,7 @@ var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
system.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -534,8 +541,7 @@ var configEncryptionCommand = &cobra.Command{
|
||||
Use: "encryption",
|
||||
Short: `set, remove and check the encryption for the config file`,
|
||||
Long: `This command sets, clears and checks the encryption for the config file using
|
||||
the subcommands below.
|
||||
`,
|
||||
the subcommands below.`,
|
||||
}
|
||||
|
||||
var configEncryptionSetCommand = &cobra.Command{
|
||||
@@ -559,8 +565,7 @@ variable to distinguish which password you must supply.
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
briefly.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -580,8 +585,7 @@ If |--password-command| is in use, this will be called to supply the old config
|
||||
password.
|
||||
|
||||
If the config was not encrypted then no error will be returned and
|
||||
this command will do nothing.
|
||||
`, "|", "`"),
|
||||
this command will do nothing.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -600,8 +604,7 @@ It will attempt to decrypt the config using the password you supply.
|
||||
If decryption fails it will return a non-zero exit code if using
|
||||
|--password-command|, otherwise it will prompt again for the password.
|
||||
|
||||
If the config file is not encrypted it will return a non zero exit code.
|
||||
`, "|", "`"),
|
||||
If the config file is not encrypted it will return a non zero exit code.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -611,3 +614,55 @@ If the config file is not encrypted it will return a non zero exit code.
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var configStringCommand = &cobra.Command{
|
||||
Use: "string <remote>",
|
||||
Short: `Print connection string for a single remote.`,
|
||||
Long: strings.ReplaceAll(`Print a connection string for a single remote.
|
||||
|
||||
The [connection strings](/docs/#connection-strings) can be used
|
||||
wherever a remote is needed and can be more convenient than using the
|
||||
config file, especially if using the RC API.
|
||||
|
||||
Backend parameters may be provided to the command also.
|
||||
|
||||
Example:
|
||||
|
||||
|||sh
|
||||
$ rclone config string s3:rclone --s3-no-check-bucket
|
||||
:s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone
|
||||
|||
|
||||
|
||||
**NB** the strings are not quoted for use in shells (eg bash,
|
||||
powershell, windows cmd). Most will work if enclosed in "double
|
||||
quotes", however connection strings that contain double quotes will
|
||||
require further quoting which is very shell dependent.
|
||||
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
remote := args[0]
|
||||
fsInfo, _, fsPath, m, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find the overridden options and construct the string
|
||||
overridden := fsInfo.Options.NonDefault(m)
|
||||
var out strings.Builder
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsInfo.Name)
|
||||
config := overridden.Human()
|
||||
if config != "" {
|
||||
out.WriteRune(',')
|
||||
out.WriteString(config)
|
||||
}
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsPath)
|
||||
fmt.Println(out.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -31,18 +31,27 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming
|
||||
files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
`+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact
|
||||
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
|
||||
The replacement string may contain capturing group variables, referencing
|
||||
capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can
|
||||
refer to a named capturing group or it can simply be the index as a number.
|
||||
To insert a literal $, use $$.
|
||||
|
||||
Multiple transformations can be used in sequence, applied
|
||||
in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories
|
||||
### Files vs Directories
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the
|
||||
leaf file name will be transformed. However some of the transforms would be
|
||||
better applied to the whole path or just directories. To choose which which
|
||||
part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
@@ -50,42 +59,58 @@ To choose which which part of the file path is affected some tags can be added t
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
This is used by adding the tag into the transform name like this:
|
||||
¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
||||
For some conversions using all is more likely to be useful, for example
|
||||
¡--name-transform all,nfc¡.
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name.
|
||||
This will cause an error.
|
||||
|
||||
## Ordering and Conflicts
|
||||
### Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
- Transformations will be applied in the order specified by the user.
|
||||
- If the ¡file¡ tag is in use (the default) then only the leaf name of files
|
||||
will be transformed.
|
||||
- If the ¡dir¡ tag is in use then directories anywhere in the path will be
|
||||
transformed
|
||||
- If the ¡all¡ tag is in use then directories and files anywhere in the path
|
||||
will be transformed
|
||||
- Each transformation will be run one path segment at a time.
|
||||
- If a transformation adds a ¡/¡ or ends up with an empty path segment then
|
||||
that will be an error.
|
||||
- It is up to the user to put the transformations in a sensible order.
|
||||
- Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or
|
||||
¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
- Instead of enforcing mutual exclusivity, transformations are applied in
|
||||
sequence as specified by the user, allowing for intentional use cases
|
||||
(e.g., trimming one prefix before adding another).
|
||||
- Users should be aware that certain combinations may lead to unexpected
|
||||
results and should verify transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior
|
||||
### Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where
|
||||
multiple source files map to the same destination name. This can lead to race
|
||||
conditions when performing concurrent transfers. It is up to the user to
|
||||
anticipate these.
|
||||
|
||||
- If two files from the source are transformed into the same name at the
|
||||
destination, the final state may be non-deterministic.
|
||||
- Running rclone check after a sync using such transformations may erroneously
|
||||
report missing or differing files due to overwritten results.
|
||||
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
- Carefully review transformations that may introduce conflicts.
|
||||
- Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind
|
||||
that it won't show the effect of non-deterministic transformations).
|
||||
- Avoid transformations that cause multiple distinct source files to map to the
|
||||
same destination name.
|
||||
- Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
- Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every
|
||||
time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
|
||||
@@ -152,7 +152,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
for i := range rune(7) {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
|
||||
@@ -50,22 +50,30 @@ go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||sh
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||text
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||text
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||text
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||
|
||||
|
||||
If you are familiar with |rsync|, rclone always works as if you had
|
||||
written a trailing |/| - meaning "copy the contents of this directory".
|
||||
@@ -81,20 +89,22 @@ For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can copy all the files which have changed
|
||||
recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
|||sh
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|||
|
||||
|
||||
Rclone will sync the modification times of files and directories if
|
||||
the backend supports it. If metadata syncing is required then use the
|
||||
|--metadata| flag.
|
||||
|
||||
Note that the modification time and metadata for the root directory
|
||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
||||
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652)
|
||||
for more info.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without
|
||||
copying anything.
|
||||
|
||||
`, "|", "`") + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -35,26 +35,32 @@ name. If the source is a directory then it acts exactly like the
|
||||
|
||||
So
|
||||
|
||||
rclone copyto src dst
|
||||
` + "```sh" + `
|
||||
rclone copyto src dst
|
||||
` + "```" + `
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
where src and dst are rclone paths, either ` + "`remote:path`" + ` or
|
||||
` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```text" + `
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```" + `
|
||||
|
||||
This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
*If you are looking to copy just a byte range of a file, please see
|
||||
` + "`rclone cat --offset X --count Y`" + `.*
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view
|
||||
real-time transfer statistics.
|
||||
|
||||
` + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -3,6 +3,7 @@ package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -12,7 +13,9 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,6 +24,7 @@ var (
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -31,6 +35,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "")
|
||||
flags.BoolVarP(cmdFlags, &urls, "urls", "", stdout, "Use a CSV file of links to process multiple URLs", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -48,12 +53,23 @@ set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With |--print-filename| in addition, the resulting file name will be
|
||||
printed.
|
||||
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
Setting |--urls| allows you to input a CSV file of URLs in format: URL,
|
||||
FILENAME. If |--urls| is in use then replace the URL in the arguments with the
|
||||
file containing the URLs, e.g.:
|
||||
|||sh
|
||||
rclone copyurl --urls myurls.csv remote:dir
|
||||
|||
|
||||
Missing filenames will be autogenerated equivalent to using |--auto-filename|.
|
||||
Note that |--stdout| and |--print-filename| are incompatible with |--urls|.
|
||||
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
|
||||
is desired for all URLs then a file with only URLs and no filename can be used.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
@@ -62,9 +78,7 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
|
||||
- |--bind ::0| to disable IPv4
|
||||
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
|
||||
- Make sure the site works with |curl| directly
|
||||
|
||||
`, "|", "`"),
|
||||
- Make sure the site works with |curl| directly`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
"groups": "Important",
|
||||
@@ -72,32 +86,93 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
cmd.Run(true, true, command, func() error {
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
if !urls {
|
||||
return run(args)
|
||||
}
|
||||
return err
|
||||
return runURLS(args)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var copyURL = operations.CopyURL // for testing
|
||||
|
||||
// runURLS processes a .csv file of urls and filenames
|
||||
func runURLS(args []string) (err error) {
|
||||
if stdout {
|
||||
return errors.New("can't use --stdout with --urls")
|
||||
}
|
||||
if printFilename {
|
||||
return errors.New("can't use --print-filename with --urls")
|
||||
}
|
||||
dstFs := cmd.NewFsDir(args[1:])
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open .csv file: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(f, &err)
|
||||
reader := csv.NewReader(f)
|
||||
reader.FieldsPerRecord = -1
|
||||
urlList, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading .csv file: %w", err)
|
||||
}
|
||||
|
||||
ec := errcount.New()
|
||||
g, gCtx := errgroup.WithContext(context.Background())
|
||||
ci := fs.GetConfig(gCtx)
|
||||
g.SetLimit(ci.Transfers)
|
||||
|
||||
for _, urlEntry := range urlList {
|
||||
if len(urlEntry) == 0 {
|
||||
continue
|
||||
}
|
||||
g.Go(func() error {
|
||||
url := urlEntry[0]
|
||||
var filename string
|
||||
if len(urlEntry) > 1 {
|
||||
filename = urlEntry[1]
|
||||
}
|
||||
_, err := copyURL(gCtx, dstFs, filename, url, filename == "", headerFilename, noClobber)
|
||||
if err != nil {
|
||||
fs.Errorf(filename, "failed to copy URL %q: %v", url, err)
|
||||
ec.Add(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
ec.Add(g.Wait())
|
||||
return ec.Err("not all URLs copied successfully")
|
||||
}
|
||||
|
||||
// run runs the command for a single URL
|
||||
func run(args []string) error {
|
||||
var err error
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = copyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
157
cmd/copyurl/copyurl_test.go
Normal file
157
cmd/copyurl/copyurl_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func resetGlobals() {
|
||||
autoFilename = false
|
||||
headerFilename = false
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
copyURL = operations.CopyURL
|
||||
}
|
||||
|
||||
func TestRun_RequiresTwoArgsWhenNotStdout(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
err := run([]string{"https://example.com/foo"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "need 2 arguments if not using --stdout")
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithExplicitFilename_Success(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
dstPath := filepath.Join(tmp, "out.txt")
|
||||
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "https://example.com/file", url)
|
||||
assert.Equal(t, "out.txt", dstFileName)
|
||||
assert.False(t, auto)
|
||||
assert.False(t, header)
|
||||
assert.False(t, noclobber)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/file", dstPath})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithAutoFilename_AndPropagatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
autoFilename = true
|
||||
|
||||
want := errors.New("boom")
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "", dstFileName) // auto filename -> empty
|
||||
assert.True(t, auto)
|
||||
return nil, want
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/auto/name", tmp})
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, want, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRunURLS_ErrorsWithStdoutAndWithPrintFilename(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
stdout = true
|
||||
err := runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --stdout with --urls")
|
||||
|
||||
resetGlobals()
|
||||
printFilename = true
|
||||
err = runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --print-filename with --urls")
|
||||
}
|
||||
|
||||
func TestRunURLS_ProcessesCSV_ParallelCalls_AndAggregatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
csvPath := filepath.Join(tmp, "urls.csv")
|
||||
csvContent := []byte(
|
||||
"https://example.com/a,aaa.txt\n" + // success
|
||||
"https://example.com/b\n" + // auto filename
|
||||
"https://example.com/c,ccc.txt\n") // error
|
||||
require.NoError(t, os.WriteFile(csvPath, csvContent, 0o600))
|
||||
|
||||
// destination dir (local backend)
|
||||
dest := t.TempDir()
|
||||
|
||||
// mock copyURL: succeed for /a and /b, fail for /c
|
||||
|
||||
var calls int32
|
||||
var mu sync.Mutex
|
||||
var seen []string
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&calls, 1)
|
||||
mu.Lock()
|
||||
seen = append(seen, url+"|"+dstFileName)
|
||||
mu.Unlock()
|
||||
|
||||
switch {
|
||||
case url == "https://example.com/a":
|
||||
require.Equal(t, "aaa.txt", dstFileName)
|
||||
return nil, nil
|
||||
case url == "https://example.com/b":
|
||||
require.Equal(t, "", dstFileName) // auto-name path
|
||||
return nil, nil
|
||||
case url == "https://example.com/c":
|
||||
return nil, errors.New("network down")
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := runURLS([]string{csvPath, dest})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not all URLs copied successfully")
|
||||
// 3 lines => 3 calls
|
||||
assert.Equal(t, int32(3), atomic.LoadInt32(&calls))
|
||||
|
||||
// sanity: all expected URLs were seen
|
||||
assert.ElementsMatch(t,
|
||||
[]string{
|
||||
"https://example.com/a|aaa.txt",
|
||||
"https://example.com/b|",
|
||||
"https://example.com/c|ccc.txt",
|
||||
},
|
||||
seen,
|
||||
)
|
||||
}
|
||||
@@ -37,14 +37,18 @@ checksum of the file it has just encrypted.
|
||||
|
||||
Use it like this
|
||||
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
You can use it like this also, but that will involve downloading all
|
||||
the files in remote:path.
|
||||
the files in ` + "`remote:path`" + `.
|
||||
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
After it has run it will log the status of the ` + "`encryptedremote:`" + `.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
|
||||
@@ -33,13 +33,13 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
|
||||
|
||||
use it like this
|
||||
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
` + "```sh" + `
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
` + "```" + `
|
||||
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `
|
||||
command. See the documentation on the [crypt](/crypt/) overlay for more info.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
|
||||
@@ -47,15 +47,15 @@ directories have been merged.
|
||||
|
||||
Next, if deduping by name, for every group of duplicate file names /
|
||||
hashes, it will delete all but one identical file it finds without
|
||||
confirmation. This means that for most duplicated files the ` +
|
||||
"`dedupe`" + ` command will not be interactive.
|
||||
confirmation. This means that for most duplicated files the
|
||||
` + "`dedupe`" + ` command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
same file path and the same hash. If the backend does not support
|
||||
hashes (e.g. crypt wrapping Google Drive) then they will never be found
|
||||
to be identical. If you use the ` + "`--size-only`" + ` flag then files
|
||||
will be considered identical if they have the same size (any hash will be
|
||||
ignored). This can be useful on crypt backends which do not support hashes.
|
||||
|
||||
Next rclone will resolve the remaining duplicates. Exactly which
|
||||
action is taken depends on the dedupe mode. By default, rclone will
|
||||
@@ -68,71 +68,82 @@ Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```" + `
|
||||
|
||||
Now the ` + "`dedupe`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```" + `
|
||||
|
||||
The result being
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```" + `
|
||||
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag
|
||||
or by using an extra parameter with the same value
|
||||
|
||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
* ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
* ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
- ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
- ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
- ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
- ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
- ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
- ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
- ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
- ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
- ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
|
||||
For example, to rename all the identically named photos in your Google Photos directory, do
|
||||
For example, to rename all the identically named photos in your Google Photos
|
||||
directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```sh" + `
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```" + `
|
||||
|
||||
Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
` + "```sh" + `
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
` + "```",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -32,26 +32,29 @@ obeys include/exclude filters so can be used to selectively delete files.
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
the [purge](/commands/rclone_purge/) command.
|
||||
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
|
||||
[rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along
|
||||
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/)
|
||||
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
|
||||
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||
check what would be deleted (use either):
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
Then proceed with the actual delete:
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||
delete all files bigger than 100 MiB.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
|--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important,Filter,Listing",
|
||||
|
||||
@@ -19,9 +19,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
remove a directory and it doesn't obey include/exclude filters - if the
|
||||
specified file exists, it will always be removed.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -14,8 +14,7 @@ var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Run with ` + "`--help`" + ` to list the supported shells.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
|
||||
@@ -18,17 +18,21 @@ var bashCommandDefinition = &cobra.Command{
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone completion bash
|
||||
` + "```sh" + `
|
||||
rclone completion bash
|
||||
` + "```" + `
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```sh" + `
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```" + `
|
||||
|
||||
and so rclone will probably need to be run as root, or with sudo.
|
||||
|
||||
If you supply a path to a file as the command line argument, then
|
||||
If you supply a path to a file as the command line argument, then
|
||||
the generated script will be written to that file, in which case
|
||||
you should not need root privileges.
|
||||
|
||||
@@ -39,11 +43,12 @@ can logout and login again to use the autocompletion script.
|
||||
|
||||
Alternatively, you can source the script directly
|
||||
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```sh" + `
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```" + `
|
||||
|
||||
and the autocompletion functionality will be added to your
|
||||
current shell.
|
||||
`,
|
||||
current shell.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
|
||||
@@ -21,18 +21,21 @@ var fishCommandDefinition = &cobra.Command{
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion fish
|
||||
` + "```sh" + `
|
||||
sudo rclone completion fish
|
||||
` + "```" + `
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```sh" + `
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```" + `
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-", then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
|
||||
@@ -20,13 +20,14 @@ var powershellCommandDefinition = &cobra.Command{
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```sh" + `
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```" + `
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
If output_file is "-" or missing, then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-" or missing, then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||
|
||||
@@ -21,18 +21,21 @@ var zshCommandDefinition = &cobra.Command{
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion zsh
|
||||
` + "```sh" + `
|
||||
sudo rclone completion zsh
|
||||
` + "```" + `
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
autoload -U compinit && compinit
|
||||
` + "```sh" + `
|
||||
autoload -U compinit && compinit
|
||||
` + "```" + `
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-", then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user