mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 20:33:17 +00:00
Compare commits
106 Commits
build
...
fix-rc-dis
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef6b133710 | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e | ||
|
|
4fc4898287 | ||
|
|
b003169088 | ||
|
|
babd112665 | ||
|
|
71b9b4ad7a | ||
|
|
4368863fcb | ||
|
|
04d49bf0ea | ||
|
|
d7aa37d263 | ||
|
|
379dffa61c | ||
|
|
5fd4ece31f | ||
|
|
fc3f95190b | ||
|
|
d6f5652b65 | ||
|
|
b5cbb7520d | ||
|
|
a170dfa55b | ||
|
|
1449c5b5ba | ||
|
|
35fe609722 | ||
|
|
cce399515f | ||
|
|
8c5af2f51c | ||
|
|
c639d3656e | ||
|
|
d9fbbba5c3 | ||
|
|
fd87560388 | ||
|
|
d87720a787 | ||
|
|
d541caa52b | ||
|
|
fd1665ae93 | ||
|
|
457d80e8a9 | ||
|
|
c5a3e86df8 | ||
|
|
4026e8db20 | ||
|
|
c9ce686231 | ||
|
|
b085598cbc | ||
|
|
bb47dccdeb | ||
|
|
7a279d2789 | ||
|
|
9bd5df658a | ||
|
|
d512e4d566 | ||
|
|
3dd68c824a | ||
|
|
fbe73c993b | ||
|
|
d915f75edf | ||
|
|
26b629f42f | ||
|
|
ceaac2194c | ||
|
|
1f14b6aa35 | ||
|
|
dd75af6a18 | ||
|
|
99e8a63df2 | ||
|
|
0019e18ac3 | ||
|
|
218c3bf6e9 | ||
|
|
8f9702583d | ||
|
|
e6578fb5a1 | ||
|
|
fa1d7da272 | ||
|
|
813708c24d | ||
|
|
fee4716343 | ||
|
|
6e9a675b3f | ||
|
|
7f5a444350 | ||
|
|
d2916ac5c7 | ||
|
|
3369a15285 | ||
|
|
58aee30de7 | ||
|
|
ef919241a6 | ||
|
|
d5386bb9a7 | ||
|
|
bf46ea5611 | ||
|
|
b8a379c9c9 | ||
|
|
8c37a9c2ef | ||
|
|
963a72ce01 | ||
|
|
a4962e21d1 | ||
|
|
9e200531b1 | ||
|
|
04683f2032 | ||
|
|
b41f7994da | ||
|
|
13a5ffe391 | ||
|
|
85deea82e4 | ||
|
|
89a8ea7a91 | ||
|
|
c8912eb6a0 | ||
|
|
01674949a1 | ||
|
|
98e1d3ee73 | ||
|
|
50d7a80331 | ||
|
|
bc3e8e1abd | ||
|
|
30e80d0716 | ||
|
|
f288920696 | ||
|
|
fa2bbd705c | ||
|
|
43a794860f | ||
|
|
adfe6b3bad | ||
|
|
091ccb649c | ||
|
|
2e02d49578 | ||
|
|
514535ad46 | ||
|
|
b010591c96 | ||
|
|
1aaee9edce | ||
|
|
3f0e9f5fca | ||
|
|
cfd0d28742 | ||
|
|
e7a2b322ec | ||
|
|
d3a0805a2b | ||
|
|
d4edf8ac18 | ||
|
|
87d14b000a | ||
|
|
12bded980b | ||
|
|
6e0e76af9d | ||
|
|
6f9b2f7b9b | ||
|
|
f61d79396d | ||
|
|
9b22e38450 | ||
|
|
9e4fe18830 | ||
|
|
ae5cc1ab37 | ||
|
|
d4be38ec02 | ||
|
|
115cff3007 | ||
|
|
70b862f026 |
19
.github/workflows/build.yml
vendored
19
.github/workflows/build.yml
vendored
@@ -100,7 +100,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -222,9 +222,9 @@ jobs:
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -239,13 +239,13 @@ jobs:
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
@@ -269,7 +269,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
@@ -290,6 +290,7 @@ jobs:
|
||||
MAINTAINERS.md
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
@@ -310,7 +311,7 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
256
.golangci.yml
256
.golangci.yml
@@ -1,144 +1,146 @@
|
||||
# golangci-lint configuration options
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
# Configure the linter set. To avoid unexpected results the implicit default
|
||||
# set is ignored and all the ones to use are explicitly enabled.
|
||||
default: none
|
||||
enable:
|
||||
# Default
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- govet
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
# Additional
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
- misspell
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
staticcheck:
|
||||
# With staticcheck there is only one setting, so to extend the implicit
|
||||
# default value it must be explicitly included.
|
||||
checks:
|
||||
# Default
|
||||
- all
|
||||
- -ST1000
|
||||
- -ST1003
|
||||
- -ST1016
|
||||
- -ST1020
|
||||
- -ST1021
|
||||
- -ST1022
|
||||
# Disable quickfix checks
|
||||
- -QF*
|
||||
gocritic:
|
||||
# With gocritic there are different settings, but since enabled-checks
|
||||
# and disabled-checks cannot both be set, for full customization the
|
||||
# alternative is to disable all defaults and explicitly enable the ones
|
||||
# to use.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Skip default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Skip default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Skip default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Skip default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Skip default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Enable additional check that are not enabled by default
|
||||
#- singleCaseSwitch # Skip default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: ${base-path}/bin/rules.go
|
||||
revive:
|
||||
# With revive there is in reality only one setting, and when at least one
|
||||
# rule are specified then only these rules will be considered, defaults
|
||||
# and all others are then implicitly disabled, so must explicitly enable
|
||||
# all rules to be used.
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
#- name: empty-block # Skip default
|
||||
# disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
#- name: increment-decrement # Skip default
|
||||
# disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
#- name: redefines-builtin-id # Skip default
|
||||
# disabled: true
|
||||
#- name: superfluous-else # Skip default
|
||||
# disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
#- name: unreachable-code # Skip default
|
||||
# disabled: true
|
||||
#- name: unused-parameter # Skip default
|
||||
# disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
||||
80
CODE_OF_CONDUCT.md
Normal file
80
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Rclone Code of Conduct
|
||||
|
||||
Like the technical community as a whole, the Rclone team and community
|
||||
is made up of a mixture of professionals and volunteers from all over
|
||||
the world, working on every aspect of the mission - including
|
||||
mentorship, teaching, and connecting people.
|
||||
|
||||
Diversity is one of our huge strengths, but it can also lead to
|
||||
communication issues and unhappiness. To that end, we have a few
|
||||
ground rules that we ask people to adhere to. This code applies
|
||||
equally to founders, mentors and those seeking help and guidance.
|
||||
|
||||
This isn't an exhaustive list of things that you can't do. Rather,
|
||||
take it in the spirit in which it's intended - a guide to make it
|
||||
easier to enrich all of us and the technical communities in which we
|
||||
participate.
|
||||
|
||||
This code of conduct applies to all spaces managed by the Rclone
|
||||
project or Rclone Services Ltd. This includes the issue tracker, the
|
||||
forum, the GitHub site, the wiki, any other online services or
|
||||
in-person events. In addition, violations of this code outside these
|
||||
spaces may affect a person's ability to participate within them.
|
||||
|
||||
- **Be friendly and patient.**
|
||||
- **Be welcoming.** We strive to be a community that welcomes and
|
||||
supports people of all backgrounds and identities. This includes,
|
||||
but is not limited to members of any race, ethnicity, culture,
|
||||
national origin, colour, immigration status, social and economic
|
||||
class, educational level, sex, sexual orientation, gender identity
|
||||
and expression, age, size, family status, political belief,
|
||||
religion, and mental and physical ability.
|
||||
- **Be considerate.** Your work will be used by other people, and you
|
||||
in turn will depend on the work of others. Any decision you take
|
||||
will affect users and colleagues, and you should take those
|
||||
consequences into account when making decisions. Remember that we're
|
||||
a world-wide community, so you might not be communicating in someone
|
||||
else's primary language.
|
||||
- **Be respectful.** Not all of us will agree all the time, but
|
||||
disagreement is no excuse for poor behavior and poor manners. We
|
||||
might all experience some frustration now and then, but we cannot
|
||||
allow that frustration to turn into a personal attack. It's
|
||||
important to remember that a community where people feel
|
||||
uncomfortable or threatened is not a productive one. Members of the
|
||||
Rclone community should be respectful when dealing with other
|
||||
members as well as with people outside the Rclone community.
|
||||
- **Be careful in the words that you choose.** We are a community of
|
||||
professionals, and we conduct ourselves professionally. Be kind to
|
||||
others. Do not insult or put down other participants. Harassment and
|
||||
other exclusionary behavior aren't acceptable. This includes, but is
|
||||
not limited to:
|
||||
- Violent threats or language directed against another person.
|
||||
- Discriminatory jokes and language.
|
||||
- Posting sexually explicit or violent material.
|
||||
- Posting (or threatening to post) other people's personally
|
||||
identifying information ("doxing").
|
||||
- Personal insults, especially those using racist or sexist terms.
|
||||
- Unwelcome sexual attention.
|
||||
- Advocating for, or encouraging, any of the above behavior.
|
||||
- Repeated harassment of others. In general, if someone asks you to
|
||||
stop, then stop.
|
||||
- **When we disagree, try to understand why.** Disagreements, both
|
||||
social and technical, happen all the time and Rclone is no
|
||||
exception. It is important that we resolve disagreements and
|
||||
differing views constructively. Remember that we're different. The
|
||||
strength of Rclone comes from its varied community, people from a
|
||||
wide range of backgrounds. Different people have different
|
||||
perspectives on issues. Being unable to understand why someone holds
|
||||
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
||||
human to err and blaming each other doesn't get us anywhere.
|
||||
Instead, focus on helping to resolve issues and learning from
|
||||
mistakes.
|
||||
|
||||
If you believe someone is violating the code of conduct, we ask that
|
||||
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
||||
|
||||
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
||||
|
||||
## Questions?
|
||||
|
||||
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
||||
46422
MANUAL.html
generated
46422
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
5935
MANUAL.txt
generated
5935
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
@@ -100,6 +100,7 @@ compiletest:
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@bin/markdown-lint
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
go generate ./lib/transform
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
go run bin/make_bisync_docs.go ./docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
|
||||
@@ -59,6 +59,7 @@ directories to and from different cloud storage providers.
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
@@ -105,6 +106,7 @@ directories to and from different cloud storage providers.
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -1338,9 +1338,9 @@ func (f *Fs) containerOK(container string) bool {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if !f.containerOK(containerName) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -1348,16 +1348,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(containerName)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
@@ -1393,14 +1393,47 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2119,7 +2152,6 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
@@ -2765,8 +2797,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2778,7 +2808,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2820,7 +2850,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3156,6 +3186,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -890,14 +890,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2192,13 +2224,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
ChunkSize: up.chunkSize,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
return info, up, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -2428,6 +2464,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
|
||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
// There is some confusion about how this is actually
|
||||
// returned. The []struct has worked for many years, but in
|
||||
// https://github.com/rclone/rclone/issues/8776 box was
|
||||
// returning it returned not as a list. We don't actually use
|
||||
// this so comment it out.
|
||||
//
|
||||
// Order struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
//
|
||||
// Order []struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
for part := range strings.SplitSeq(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
|
||||
@@ -187,7 +187,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
@@ -241,18 +240,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || u.f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
@@ -366,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
@@ -633,7 +635,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
|
||||
@@ -598,7 +598,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -18,7 +18,7 @@ type headerLink struct {
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
for link := range strings.SplitSeq(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
for part := range strings.SplitSeq(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||
for _, extensionText := range extensionsIn {
|
||||
for _, extension := range strings.Split(extensionText, ",") {
|
||||
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||
if extension == "" {
|
||||
continue
|
||||
|
||||
@@ -386,7 +386,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
@@ -520,7 +519,6 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
|
||||
@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
FldID any `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
var errFileNotFound = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
|
||||
@@ -283,6 +283,7 @@ type Fs struct {
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
tlsConf *tls.Config // default TLS client config
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
@@ -408,9 +409,14 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
if f.tlsConf != nil {
|
||||
tlsConfig = f.tlsConf.Clone()
|
||||
} else {
|
||||
tlsConfig = new(tls.Config)
|
||||
}
|
||||
tlsConfig.ServerName = f.opt.Host
|
||||
if f.opt.SkipVerifyTLSCert {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
@@ -671,6 +677,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
||||
@@ -760,7 +760,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -768,16 +768,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
@@ -820,14 +820,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1462,6 +1494,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
"x-archive-auto-make-bucket": "1",
|
||||
"x-archive-queue-derive": "0",
|
||||
"x-archive-keep-old-version": "0",
|
||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-metadata-directive": "COPY",
|
||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||
"x-archive-filemeta-md5": srcObj.md5,
|
||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
for _, v := range seg {
|
||||
newValues = append(newValues, url.QueryEscape(v))
|
||||
}
|
||||
return strings.Join(newValues, "/")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -841,7 +842,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
err := os.Remove(localPath)
|
||||
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||
if os.Chmod(localPath, 0o600) == nil {
|
||||
err = os.Remove(localPath)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
whenRFC := when.Local().Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||
// and https://github.com/golang/go/issues/26295
|
||||
func TestRmdirWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -400,7 +400,7 @@ type quirks struct {
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
for flag := range strings.SplitSeq(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
f.speedupAny = false
|
||||
uniqueValidPatterns := make(map[string]any)
|
||||
|
||||
for _, pattern := range strings.Split(patternString, ",") {
|
||||
for pattern := range strings.SplitSeq(patternString, ",") {
|
||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||
if pattern == "" {
|
||||
continue
|
||||
|
||||
@@ -325,13 +325,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// listDir lists the bucket to the entries
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to entries
|
||||
@@ -354,15 +353,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -629,6 +659,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -243,7 +243,6 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||
numSet = 0
|
||||
for k, v := range metadata {
|
||||
k, v := k, v
|
||||
switch k {
|
||||
case "mtime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
@@ -422,12 +421,7 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
|
||||
@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
|
||||
@@ -254,15 +254,47 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -411,24 +443,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -765,6 +797,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -136,8 +137,25 @@ type Link struct {
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
if l == nil || l.URL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Primary validation: check URL's expire query parameter
|
||||
if u, err := url.Parse(l.URL); err == nil {
|
||||
if expireStr := u.Query().Get("expire"); expireStr != "" {
|
||||
// Try parsing as Unix timestamp (seconds)
|
||||
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
|
||||
expireTime := time.Unix(expireInt, 0)
|
||||
return time.Now().Add(10 * time.Second).Before(expireTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback validation: use the Expire field if URL parsing didn't work
|
||||
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
|
||||
99
backend/pikpak/api/types_test.go
Normal file
99
backend/pikpak/api/types_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestLinkValid tests the Link.Valid method for various scenarios
|
||||
func TestLinkValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
link *Link
|
||||
expected bool
|
||||
desc string
|
||||
}{
|
||||
{
|
||||
name: "nil link",
|
||||
link: nil,
|
||||
expected: false,
|
||||
desc: "nil link should be invalid",
|
||||
},
|
||||
{
|
||||
name: "empty URL",
|
||||
link: &Link{URL: ""},
|
||||
expected: false,
|
||||
desc: "empty URL should be invalid",
|
||||
},
|
||||
{
|
||||
name: "valid URL with future expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL with future expire parameter should be valid",
|
||||
},
|
||||
{
|
||||
name: "expired URL with past expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL with past expire parameter should be invalid",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter takes precedence over Expire field",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL expire parameter should take precedence over Expire field",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter within 10 second buffer should be invalid",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL expire parameter within 10 second buffer should be invalid",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when no URL expire parameter",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL has no expire parameter",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when URL expire parameter is invalid",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file?expire=invalid",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)),
|
||||
},
|
||||
expected: false,
|
||||
desc: "should be invalid when both URL expire and Expire field are expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.link.Valid()
|
||||
if result != tt.expected {
|
||||
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
|
||||
@@ -119,6 +119,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "IDrive",
|
||||
Help: "IDrive e2",
|
||||
}, {
|
||||
Value: "Intercolo",
|
||||
Help: "Intercolo Object Storage",
|
||||
}, {
|
||||
Value: "IONOS",
|
||||
Help: "IONOS Cloud",
|
||||
@@ -170,6 +173,9 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Selectel",
|
||||
Help: "Selectel Object Storage",
|
||||
}, {
|
||||
Value: "SpectraLogic",
|
||||
Help: "Spectra Logic Black Pearl",
|
||||
}, {
|
||||
Value: "StackPath",
|
||||
Help: "StackPath Object Storage",
|
||||
@@ -504,6 +510,14 @@ func init() {
|
||||
Value: "us-east-1",
|
||||
Help: "Indore, Madhya Pradesh, India",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "Intercolo",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "de-fra",
|
||||
Help: "Frankfurt, Germany",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
@@ -643,7 +657,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,Intercolo,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -954,6 +968,14 @@ func init() {
|
||||
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Singapore Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Intercolo Object Storage.",
|
||||
Provider: "Intercolo",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "de-fra.i3storage.com",
|
||||
Help: "Frankfurt, Germany",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
|
||||
@@ -1532,7 +1554,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,Intercolo,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -2067,7 +2089,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -2082,7 +2104,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade,Mega",
|
||||
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade,Mega",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -2140,7 +2162,7 @@ isn't set then "acl" is used instead.
|
||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||
header is added and the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade",
|
||||
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
@@ -3677,6 +3699,9 @@ func setQuirks(opt *Options) {
|
||||
case "IDrive":
|
||||
virtualHostStyle = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Intercolo":
|
||||
// no quirks
|
||||
useUnsignedPayload = false // Intercolo has trailer support
|
||||
case "IONOS":
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
@@ -3749,6 +3774,8 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false // untested
|
||||
useAlreadyExists = false // untested
|
||||
case "SpectraLogic":
|
||||
virtualHostStyle = false // path-style required
|
||||
case "StackPath":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
@@ -6220,8 +6247,8 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
metaData := make(map[string]string)
|
||||
for key, value := range resp.Header {
|
||||
key = strings.ToLower(key)
|
||||
if strings.HasPrefix(key, "x-amz-meta-") {
|
||||
metaKey := strings.TrimPrefix(key, "x-amz-meta-")
|
||||
if after, ok := strings.CutPrefix(key, "x-amz-meta-"); ok {
|
||||
metaKey := after
|
||||
metaData[metaKey] = value[0]
|
||||
}
|
||||
}
|
||||
@@ -6641,7 +6668,7 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
|
||||
return wantETag, gotETag, versionID, ui, err
|
||||
}
|
||||
|
||||
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
|
||||
s3cw := chunkWriter.(*s3ChunkWriter)
|
||||
gotETag = *stringClone(s3cw.eTag)
|
||||
versionID = stringClone(s3cw.versionID)
|
||||
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||
|
||||
const numGoroutines = 10
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
mockFile := newMockFile()
|
||||
pool.pool = append(pool.pool, mockFile)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
// Test concurrent get operations
|
||||
done := make(chan bool, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
go func() {
|
||||
defer func() { done <- true }()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
<-done
|
||||
}
|
||||
|
||||
|
||||
@@ -192,6 +192,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if root is empty or ends with / (must be a directory)
|
||||
isRootDir := isPathDir(root)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
@@ -218,6 +221,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Skip stat check if root is already a directory
|
||||
if isRootDir {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -894,6 +902,11 @@ func ensureSuffix(s, suffix string) string {
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
// isPathDir determines if a path represents a directory based on trailing slash
|
||||
func isPathDir(path string) bool {
|
||||
return path == "" || strings.HasSuffix(path, "/")
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
|
||||
41
backend/smb/smb_internal_test.go
Normal file
41
backend/smb/smb_internal_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Unit tests for internal SMB functions
|
||||
package smb
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestIsPathDir tests the isPathDir function logic
|
||||
func TestIsPathDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Empty path should be considered a directory
|
||||
{"", true},
|
||||
|
||||
// Paths with trailing slash should be directories
|
||||
{"/", true},
|
||||
{"share/", true},
|
||||
{"share/dir/", true},
|
||||
{"share/dir/subdir/", true},
|
||||
|
||||
// Paths without trailing slash should not be directories
|
||||
{"share", false},
|
||||
{"share/dir", false},
|
||||
{"share/dir/file", false},
|
||||
{"share/dir/subdir/file", false},
|
||||
|
||||
// Edge cases
|
||||
{"share//", true},
|
||||
{"share///", true},
|
||||
{"share/dir//", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isPathDir(tt.path)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -773,21 +773,20 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
@@ -818,14 +817,46 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1650,6 +1681,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package common defines code common to the union and the policies
|
||||
//
|
||||
// These need to be defined in a separate package to avoid import loops
|
||||
package common
|
||||
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
|
||||
|
||||
import "github.com/rclone/rclone/fs"
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
|
||||
@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
for checksum := range strings.SplitSeq(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
|
||||
119
bin/make-test-certs.sh
Executable file
119
bin/make-test-certs.sh
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Create test TLS certificates for use with rclone.
|
||||
|
||||
OUT_DIR="${OUT_DIR:-./tls-test}"
|
||||
CA_SUBJ="${CA_SUBJ:-/C=US/ST=Test/L=Test/O=Test Org/OU=Test Unit/CN=Test Root CA}"
|
||||
SERVER_CN="${SERVER_CN:-localhost}"
|
||||
CLIENT_CN="${CLIENT_CN:-Test Client}"
|
||||
CLIENT_KEY_PASS="${CLIENT_KEY_PASS:-testpassword}"
|
||||
|
||||
CA_DAYS=${CA_DAYS:-3650}
|
||||
SERVER_DAYS=${SERVER_DAYS:-825}
|
||||
CLIENT_DAYS=${CLIENT_DAYS:-825}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
cd "$OUT_DIR"
|
||||
|
||||
# Create OpenSSL config
|
||||
|
||||
# CA extensions
|
||||
cat > ca_openssl.cnf <<'EOF'
|
||||
[ ca_ext ]
|
||||
basicConstraints = critical, CA:true, pathlen:1
|
||||
keyUsage = critical, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always,issuer
|
||||
EOF
|
||||
|
||||
# Server extensions (SAN includes localhost + loopback IP)
|
||||
cat > server_openssl.cnf <<EOF
|
||||
[ server_ext ]
|
||||
basicConstraints = critical, CA:false
|
||||
keyUsage = critical, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
DNS.1 = ${SERVER_CN}
|
||||
IP.1 = 127.0.0.1
|
||||
EOF
|
||||
|
||||
# Client extensions (for mTLS client auth)
|
||||
cat > client_openssl.cnf <<'EOF'
|
||||
[ client_ext ]
|
||||
basicConstraints = critical, CA:false
|
||||
keyUsage = critical, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
EOF
|
||||
|
||||
echo "Create CA key, CSR, and self-signed CA cert"
|
||||
if [ ! -f ca.key.pem ]; then
|
||||
openssl genrsa -out ca.key.pem 4096
|
||||
chmod 600 ca.key.pem
|
||||
fi
|
||||
|
||||
openssl req -new -key ca.key.pem -subj "$CA_SUBJ" -out ca.csr.pem
|
||||
|
||||
openssl x509 -req -in ca.csr.pem -signkey ca.key.pem \
|
||||
-sha256 -days "$CA_DAYS" \
|
||||
-extfile ca_openssl.cnf -extensions ca_ext \
|
||||
-out ca.cert.pem
|
||||
|
||||
echo "Create server key (NO PASSWORD) and cert signed by CA"
|
||||
openssl genrsa -out server.key.pem 2048
|
||||
chmod 600 server.key.pem
|
||||
|
||||
openssl req -new -key server.key.pem -subj "/CN=${SERVER_CN}" -out server.csr.pem
|
||||
|
||||
openssl x509 -req -in server.csr.pem \
|
||||
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||
-out server.cert.pem -days "$SERVER_DAYS" -sha256 \
|
||||
-extfile server_openssl.cnf -extensions server_ext
|
||||
|
||||
echo "Create client key (PASSWORD-PROTECTED), CSR, and cert"
|
||||
openssl genrsa -aes256 -passout pass:"$CLIENT_KEY_PASS" -out client.key.pem 2048
|
||||
chmod 600 client.key.pem
|
||||
|
||||
openssl req -new -key client.key.pem -passin pass:"$CLIENT_KEY_PASS" \
|
||||
-subj "/CN=${CLIENT_CN}" -out client.csr.pem
|
||||
|
||||
openssl x509 -req -in client.csr.pem \
|
||||
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||
-out client.cert.pem -days "$CLIENT_DAYS" -sha256 \
|
||||
-extfile client_openssl.cnf -extensions client_ext
|
||||
|
||||
echo "Verify chain"
|
||||
openssl verify -CAfile ca.cert.pem server.cert.pem client.cert.pem
|
||||
|
||||
echo "Done"
|
||||
|
||||
echo
|
||||
echo "Summary"
|
||||
echo "-------"
|
||||
printf "%-22s %s\n" \
|
||||
"CA key:" "ca.key.pem" \
|
||||
"CA cert:" "ca.cert.pem" \
|
||||
"Server key:" "server.key.pem (no password)" \
|
||||
"Server CSR:" "server.csr.pem" \
|
||||
"Server cert:" "server.cert.pem (SAN: ${SERVER_CN}, 127.0.0.1)" \
|
||||
"Client key:" "client.key.pem (encrypted)" \
|
||||
"Client CSR:" "client.csr.pem" \
|
||||
"Client cert:" "client.cert.pem" \
|
||||
"Client key password:" "$CLIENT_KEY_PASS"
|
||||
|
||||
echo
|
||||
echo "Test rclone server"
|
||||
echo
|
||||
echo "rclone serve http -vv --addr :8080 --cert ${OUT_DIR}/server.cert.pem --key ${OUT_DIR}/server.key.pem --client-ca ${OUT_DIR}/ca.cert.pem ."
|
||||
|
||||
echo
|
||||
echo "Test rclone client"
|
||||
echo
|
||||
echo "rclone lsf :http: --http-url 'https://localhost:8080' --ca-cert ${OUT_DIR}/ca.cert.pem --client-cert ${OUT_DIR}/client.cert.pem --client-key ${OUT_DIR}/client.key.pem --client-pass \$(rclone obscure $CLIENT_KEY_PASS)"
|
||||
echo
|
||||
159
bin/make_bisync_docs.go
Normal file
159
bin/make_bisync_docs.go
Normal file
@@ -0,0 +1,159 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/runs"
|
||||
"github.com/stretchr/testify/assert/yaml"
|
||||
)
|
||||
|
||||
var path = flag.String("path", "./docs/content/", "root path")
|
||||
|
||||
const (
|
||||
configFile = "fstest/test_all/config.yaml"
|
||||
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
|
||||
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing ignores: %v", err)
|
||||
}
|
||||
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing failures: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// replaceBetween replaces the text between startSep and endSep with fn()
|
||||
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
|
||||
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
doc := string(b)
|
||||
|
||||
before, after, found := strings.Cut(doc, startSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", startSep)
|
||||
}
|
||||
_, after, found = strings.Cut(after, endSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", endSep)
|
||||
}
|
||||
|
||||
replaceSection, err := fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
|
||||
|
||||
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIgnores updates the list of ignores from config.yaml
|
||||
func getIgnores() (string, error) {
|
||||
config, err := parseConfig()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse config: %v", err)
|
||||
}
|
||||
s := ""
|
||||
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
|
||||
return cmp.Compare(a.Remote, b.Remote)
|
||||
})
|
||||
for _, backend := range config.Backends {
|
||||
include := false
|
||||
|
||||
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
|
||||
include = true
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
|
||||
for _, ignore := range backend.Ignore {
|
||||
if strings.Contains(strings.ToLower(ignore), "bisync") {
|
||||
if !include { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
include = true
|
||||
s += fmt.Sprintf(" - `%s`\n", ignore)
|
||||
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// getFailures updates the list of currently failing tests from the integration tests server
|
||||
func getFailures() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
r := runs.Report{}
|
||||
err = json.Unmarshal(buf.Bytes(), &r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal json: %v", err)
|
||||
}
|
||||
|
||||
s := ""
|
||||
for _, run := range r.Failed {
|
||||
for i, t := range run.FailedTests {
|
||||
if strings.Contains(strings.ToLower(t), "bisync") {
|
||||
|
||||
if i == 0 { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
|
||||
}
|
||||
|
||||
url := integrationTestsHTMLURL + run.TrialName
|
||||
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
|
||||
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
|
||||
|
||||
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
|
||||
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s += fmt.Sprintf("- Updated: %v", r.DateTime)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// parseConfig reads and parses the config.yaml file
|
||||
func parseConfig() (*runs.Config, error) {
|
||||
d, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
config := &runs.Config{}
|
||||
err = yaml.Unmarshal(d, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
17
bin/markdown-lint
Executable file
17
bin/markdown-lint
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Run markdown linting locally
|
||||
set -e
|
||||
|
||||
# Workflow
|
||||
build=.github/workflows/build.yml
|
||||
|
||||
# Globs read from from $build
|
||||
globs=$(awk '/- name: Check Markdown format/{f=1;next} f && /globs:/{f=2;next} f==2 && NF{if($1=="-"){exit} print $0}' $build)
|
||||
|
||||
if [ -z "$globs" ]; then
|
||||
echo "Error: No globs found in Check Markdown step in $build" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
|
||||
@@ -33,7 +33,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
for _, line := range bytes.Split(out, []byte{'\n'}) {
|
||||
for line := range bytes.SplitSeq(out, []byte{'\n'}) {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -51,47 +51,52 @@ output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```text" + `
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```" + `
|
||||
|
||||
Where the fields are:
|
||||
|
||||
* Total: Total size available.
|
||||
* Used: Total size used.
|
||||
* Free: Total space available to this user.
|
||||
* Trashed: Total space used by trash.
|
||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
* Objects: Total number of objects in the storage.
|
||||
- Total: Total size available.
|
||||
- Used: Total size used.
|
||||
- Free: Total space available to this user.
|
||||
- Trashed: Total space used by trash.
|
||||
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
- Objects: Total number of objects in the storage.
|
||||
|
||||
All sizes are in number of bytes.
|
||||
|
||||
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```text" + `
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```" + `
|
||||
|
||||
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```" + `
|
||||
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
|
||||
@@ -30,14 +30,16 @@ rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Use --template to generate HTML output via a custom Go template. If a blank
|
||||
string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
|
||||
@@ -37,26 +37,33 @@ see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```sh" + `
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```" + `
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
` + "```sh" + `
|
||||
rclone backend features remote:
|
||||
` + "```" + `
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```sh" + `
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```" + `
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```sh" + `
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```" + `
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
[backend/command](/rc/#backend-command) in the rc docs.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -177,6 +177,7 @@ var (
|
||||
// "src and dst identical but can't set mod time without deleting and re-uploading"
|
||||
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
|
||||
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
|
||||
argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10
|
||||
)
|
||||
|
||||
// bisyncTest keeps all test data in a single place
|
||||
@@ -284,6 +285,15 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
if !isLocal(*fstest.RemoteName) {
|
||||
t.Skip("TestBisyncConcurrent is skipped on non-local")
|
||||
}
|
||||
if *argTestCase != "" && *argTestCase != "basic" {
|
||||
t.Skip("TestBisyncConcurrent only tests 'basic'")
|
||||
}
|
||||
if *argPCount < 2 {
|
||||
t.Skip("TestBisyncConcurrent is pointless with -pcount < 2")
|
||||
}
|
||||
if *argGolden {
|
||||
t.Skip("skip TestBisyncConcurrent when goldenizing")
|
||||
}
|
||||
oldArgTestCase := argTestCase
|
||||
*argTestCase = "basic"
|
||||
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
|
||||
@@ -292,8 +302,9 @@ func TestBisyncConcurrent(t *testing.T) {
|
||||
*ignoreLogs = false
|
||||
})
|
||||
|
||||
t.Run("test1", testParallel)
|
||||
t.Run("test2", testParallel)
|
||||
for i := 0; i < *argPCount; i++ {
|
||||
t.Run(fmt.Sprintf("test%v", i), testParallel)
|
||||
}
|
||||
}
|
||||
|
||||
func testParallel(t *testing.T) {
|
||||
@@ -465,6 +476,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
|
||||
// Prepare initial content
|
||||
b.cleanupCase(ctx)
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty
|
||||
fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision())
|
||||
initFs, err := cache.Get(ctx, b.initDir)
|
||||
@@ -510,7 +522,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
require.NoError(b.t, err)
|
||||
b.step = 0
|
||||
b.stopped = false
|
||||
for _, line := range strings.Split(string(scenBuf), "\n") {
|
||||
for line := range strings.SplitSeq(string(scenBuf), "\n") {
|
||||
comment := strings.Index(line, "#")
|
||||
if comment != -1 {
|
||||
line = line[:comment]
|
||||
@@ -641,12 +653,11 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||
_ = operations.Purge(ctx, b.fs1, "")
|
||||
_ = operations.Purge(ctx, b.fs2, "")
|
||||
_ = os.RemoveAll(b.workDir)
|
||||
accounting.Stats(ctx).ResetCounters()
|
||||
}
|
||||
|
||||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
ctx = accounting.WithStatsGroup(ctx, random.String(8))
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -925,7 +936,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
// splitLine splits scenario line into tokens and performs
|
||||
// substitutions that involve whitespace or control chars.
|
||||
func splitLine(line string) (args []string) {
|
||||
for _, s := range strings.Fields(line) {
|
||||
for s := range strings.FieldsSeq(line) {
|
||||
b := []byte(whitespaceReplacer.Replace(s))
|
||||
b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte {
|
||||
c, _ := strconv.ParseUint(string(b[5:7]), 16, 8)
|
||||
@@ -1007,6 +1018,7 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
}
|
||||
// test if modtimes are writeable
|
||||
testSetModtime := func(f fs.Fs) {
|
||||
ctx := accounting.WithStatsGroup(ctx, random.String(8)) // keep stats separate
|
||||
in := bytes.NewBufferString("modtime_write_test")
|
||||
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
|
||||
obj, err := f.Put(ctx, in, objinfo)
|
||||
@@ -1018,6 +1030,11 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
||||
if err == fs.ErrorCantSetModTime {
|
||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||
}
|
||||
if err == fs.ErrorCantSetModTimeWithoutDelete { // transfers stats expected to differ on this backend
|
||||
logReplacements = append(logReplacements, `^.*There was nothing to transfer.*$`, dropMe)
|
||||
} else {
|
||||
require.NoError(b.t, err)
|
||||
}
|
||||
if !f.Features().IsLocal {
|
||||
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
|
||||
}
|
||||
@@ -1496,7 +1513,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
|
||||
fs.Log(nil, divider)
|
||||
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") {
|
||||
fs.Logf(nil, "| %s", strings.TrimSpace(line))
|
||||
}
|
||||
}
|
||||
@@ -1619,6 +1636,14 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
`^.*not equal on recheck.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
if b.ignoreBlankHash || !b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5) {
|
||||
// if either side lacks support for md5, need to ignore the "nothing to transfer" log,
|
||||
// as sync may in fact need to transfer, where it would otherwise skip based on hash or just update modtime.
|
||||
// transfer stats will also differ in fs.ErrorCantSetModTimeWithoutDelete scenario, and where --download-hash is needed.
|
||||
logReplacements = append(logReplacements,
|
||||
`^.*There was nothing to transfer.*$`, dropMe,
|
||||
)
|
||||
}
|
||||
rep := logReplacements
|
||||
if b.testCase == "dry_run" {
|
||||
rep = append(rep, dryrunReplacements...)
|
||||
|
||||
@@ -141,7 +141,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||
@@ -163,7 +163,6 @@ var commandDefinition = &cobra.Command{
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
"status": "Beta",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||
@@ -191,7 +190,6 @@ var commandDefinition = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
||||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
|
||||
@@ -177,7 +177,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
b.opt.Compare.HashType2 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
@@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
opts := strings.SplitSeq(b.opt.CompareFlag, ",")
|
||||
for opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
|
||||
@@ -35,8 +35,7 @@ var rcHelp = makeHelp(`This takes the following parameters
|
||||
- removeEmptyDirs - remove empty directories at the final cleanup step
|
||||
- filtersFile - read filtering patterns from a file
|
||||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
||||
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
||||
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
||||
@@ -52,14 +51,15 @@ var longHelp = shortHelp + makeHelp(`
|
||||
bidirectional cloud sync solution in rclone.
|
||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||
On each successive run it will:
|
||||
|
||||
- list files on Path1 and Path2, and check for changes on each side.
|
||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is **in beta** and is considered an **advanced command**, so use with care.
|
||||
Bisync is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||
before using, or data loss can result. Questions can be asked in the
|
||||
[Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.`)
|
||||
|
||||
@@ -434,7 +434,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
}
|
||||
|
||||
fulllisting, err = b.loadListingNum(listingNum)
|
||||
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
@@ -610,6 +609,11 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
if new == nil { // should not happen. log error and debug info
|
||||
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
|
||||
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
|
||||
continue
|
||||
}
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -59,6 +61,7 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -133,6 +136,7 @@ INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -87,6 +89,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -21,7 +21,9 @@ INFO : Using filters file {workdir/}exclude-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}exclude-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -136,7 +138,9 @@ INFO : Using filters file {workdir/}include-other-filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}include-other-filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -90,7 +92,9 @@ INFO : Copying Path2 files to Path1
|
||||
INFO : Checking access health
|
||||
INFO : Found 2 matching ".chk_file" files on both paths
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -102,7 +104,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -23,7 +23,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -80,7 +82,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : subdir: Making directory
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -124,6 +126,7 @@ INFO : Path2: 1 changes: [32m 0 new[0m, [33m 0 modified[0m, [31m
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}RCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -148,7 +151,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -188,6 +193,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : subdir: Removing directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -27,7 +27,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}測試Русский ěáñ/" with Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}測試Русский ěáñ/" vs Path2 "{path2/}測試Русский ěáñ/"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,7 +86,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -174,7 +178,9 @@ INFO : Using filters file {workdir/}測試_filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}測試_filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -20,7 +20,9 @@ INFO : Using filters file {workdir/}filtersfile.flt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.flt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -81,7 +83,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Storing filters file hash to {workdir/}filtersfile.txt.{hashtype}
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -146,7 +150,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
|
||||
INFO : Skipped storing filters file hash to {workdir/}filtersfile.txt.{hashtype} as --dry-run is set
|
||||
INFO : Copying Path2 files to Path1
|
||||
NOTICE: - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
NOTICE: - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -33,7 +35,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,6 +86,7 @@ INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file4.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}file5.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -84,6 +86,7 @@ INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[31mQueue delete[0m[0m - [36m{path1/}file5.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -17,7 +17,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -115,7 +117,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -154,6 +158,7 @@ INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}file2.txt[0m
|
||||
INFO : - [34mPath2[0m [35m[32mQueue copy to[0m Path1[0m - [36m{path1/}subdir/file21.txt[0m
|
||||
INFO : - [34mPath2[0m [35mDo queued copies to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -171,6 +176,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -39,6 +39,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -22,6 +22,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
@@ -129,6 +130,7 @@ INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
|
||||
INFO : Resync updating listings
|
||||
@@ -158,6 +160,7 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
|
||||
@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
@@ -45,6 +47,7 @@ INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - [34mPath2[0m [35m[31mQueue delete[0m[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : There was nothing to transfer
|
||||
INFO : Resync updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
@@ -43,15 +43,21 @@ var commandDefinition = &cobra.Command{
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|||sh
|
||||
rclone cat remote:path/to/file
|
||||
|||
|
||||
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Use the |--head| flag to print characters only at the start, |--tail| for
|
||||
the end and |--offset| and |--count| to print a section in the middle.
|
||||
@@ -62,14 +68,17 @@ Use the |--separator| flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
- bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
* powershell:
|
||||
- powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
`, "|", "`"),
|
||||
|||powershell
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
|||`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -74,8 +74,7 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
option for more information.`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||
|
||||
@@ -17,8 +17,7 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
versions. Not supported by all remotes.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -44,8 +44,7 @@ var configCommand = &cobra.Command{
|
||||
Short: `Enter an interactive configuration session.`,
|
||||
Long: `Enter an interactive configuration session where you can setup new
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
password to protect your configuration.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -134,9 +133,7 @@ sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
`,
|
||||
It should be double checked before posting as the redaction may not be perfect.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.64",
|
||||
},
|
||||
@@ -178,8 +175,8 @@ var configProvidersCommand = &cobra.Command{
|
||||
|
||||
var updateRemoteOpt config.UpdateRemoteOpt
|
||||
|
||||
var configPasswordHelp = strings.ReplaceAll(`
|
||||
Note that if the config process would normally ask a question the
|
||||
var configPasswordHelp = strings.ReplaceAll(
|
||||
`Note that if the config process would normally ask a question the
|
||||
default is taken (unless |--non-interactive| is used). Each time
|
||||
that happens rclone will print or DEBUG a message saying how to
|
||||
affect the value taken.
|
||||
@@ -205,29 +202,29 @@ it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
|||
|
||||
|
||||
@@ -250,7 +247,9 @@ The keys of |Option| are used as follows:
|
||||
If |Error| is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||sh
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||
|
||||
|
||||
Note that when using |--continue| all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
@@ -264,8 +263,7 @@ not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that |bin/config.py| in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
`, "|", "`")
|
||||
as a readable demonstration.`, "|", "`")
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create name type [key value]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
@@ -275,13 +273,18 @@ should be passed in pairs of |key| |value| or as |key=value|.
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||sh
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||sh
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -344,13 +347,18 @@ pairs of |key| |value| or as |key=value|.
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||sh
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||sh
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -388,12 +396,13 @@ The |password| should be passed in in clear (unobscured).
|
||||
|
||||
For example, to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||sh
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||
|
||||
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
both support obscuring passwords directly.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -441,8 +450,7 @@ var configReconnectCommand = &cobra.Command{
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
This normally means going through the interactive oauth flow again.
|
||||
`,
|
||||
This normally means going through the interactive oauth flow again.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
@@ -461,8 +469,7 @@ var configDisconnectCommand = &cobra.Command{
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
To reconnect use "rclone config reconnect".
|
||||
`,
|
||||
To reconnect use "rclone config reconnect".`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -490,8 +497,7 @@ var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
system.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -534,8 +540,7 @@ var configEncryptionCommand = &cobra.Command{
|
||||
Use: "encryption",
|
||||
Short: `set, remove and check the encryption for the config file`,
|
||||
Long: `This command sets, clears and checks the encryption for the config file using
|
||||
the subcommands below.
|
||||
`,
|
||||
the subcommands below.`,
|
||||
}
|
||||
|
||||
var configEncryptionSetCommand = &cobra.Command{
|
||||
@@ -559,8 +564,7 @@ variable to distinguish which password you must supply.
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
briefly.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -580,8 +584,7 @@ If |--password-command| is in use, this will be called to supply the old config
|
||||
password.
|
||||
|
||||
If the config was not encrypted then no error will be returned and
|
||||
this command will do nothing.
|
||||
`, "|", "`"),
|
||||
this command will do nothing.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -600,8 +603,7 @@ It will attempt to decrypt the config using the password you supply.
|
||||
If decryption fails it will return a non-zero exit code if using
|
||||
|--password-command|, otherwise it will prompt again for the password.
|
||||
|
||||
If the config file is not encrypted it will return a non zero exit code.
|
||||
`, "|", "`"),
|
||||
If the config file is not encrypted it will return a non zero exit code.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
|
||||
@@ -31,18 +31,27 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming
|
||||
files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
`+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact
|
||||
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
|
||||
The replacement string may contain capturing group variables, referencing
|
||||
capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can
|
||||
refer to a named capturing group or it can simply be the index as a number.
|
||||
To insert a literal $, use $$.
|
||||
|
||||
Multiple transformations can be used in sequence, applied
|
||||
in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories
|
||||
### Files vs Directories
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the
|
||||
leaf file name will be transformed. However some of the transforms would be
|
||||
better applied to the whole path or just directories. To choose which which
|
||||
part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
@@ -50,42 +59,58 @@ To choose which which part of the file path is affected some tags can be added t
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
This is used by adding the tag into the transform name like this:
|
||||
¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
||||
For some conversions using all is more likely to be useful, for example
|
||||
¡--name-transform all,nfc¡.
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name.
|
||||
This will cause an error.
|
||||
|
||||
## Ordering and Conflicts
|
||||
### Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
- Transformations will be applied in the order specified by the user.
|
||||
- If the ¡file¡ tag is in use (the default) then only the leaf name of files
|
||||
will be transformed.
|
||||
- If the ¡dir¡ tag is in use then directories anywhere in the path will be
|
||||
transformed
|
||||
- If the ¡all¡ tag is in use then directories and files anywhere in the path
|
||||
will be transformed
|
||||
- Each transformation will be run one path segment at a time.
|
||||
- If a transformation adds a ¡/¡ or ends up with an empty path segment then
|
||||
that will be an error.
|
||||
- It is up to the user to put the transformations in a sensible order.
|
||||
- Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or
|
||||
¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
- Instead of enforcing mutual exclusivity, transformations are applied in
|
||||
sequence as specified by the user, allowing for intentional use cases
|
||||
(e.g., trimming one prefix before adding another).
|
||||
- Users should be aware that certain combinations may lead to unexpected
|
||||
results and should verify transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior
|
||||
### Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where
|
||||
multiple source files map to the same destination name. This can lead to race
|
||||
conditions when performing concurrent transfers. It is up to the user to
|
||||
anticipate these.
|
||||
|
||||
- If two files from the source are transformed into the same name at the
|
||||
destination, the final state may be non-deterministic.
|
||||
- Running rclone check after a sync using such transformations may erroneously
|
||||
report missing or differing files due to overwritten results.
|
||||
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
- Carefully review transformations that may introduce conflicts.
|
||||
- Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind
|
||||
that it won't show the effect of non-deterministic transformations).
|
||||
- Avoid transformations that cause multiple distinct source files to map to the
|
||||
same destination name.
|
||||
- Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
- Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every
|
||||
time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
|
||||
@@ -152,7 +152,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
for i := range rune(7) {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
|
||||
@@ -50,22 +50,30 @@ go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||sh
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||text
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||text
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||text
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||
|
||||
|
||||
If you are familiar with |rsync|, rclone always works as if you had
|
||||
written a trailing |/| - meaning "copy the contents of this directory".
|
||||
@@ -81,20 +89,22 @@ For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can copy all the files which have changed
|
||||
recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
|||sh
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|||
|
||||
|
||||
Rclone will sync the modification times of files and directories if
|
||||
the backend supports it. If metadata syncing is required then use the
|
||||
|--metadata| flag.
|
||||
|
||||
Note that the modification time and metadata for the root directory
|
||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
||||
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652)
|
||||
for more info.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without
|
||||
copying anything.
|
||||
|
||||
`, "|", "`") + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -35,26 +35,32 @@ name. If the source is a directory then it acts exactly like the
|
||||
|
||||
So
|
||||
|
||||
rclone copyto src dst
|
||||
` + "```sh" + `
|
||||
rclone copyto src dst
|
||||
` + "```" + `
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
where src and dst are rclone paths, either ` + "`remote:path`" + ` or
|
||||
` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```text" + `
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```" + `
|
||||
|
||||
This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
*If you are looking to copy just a byte range of a file, please see
|
||||
` + "`rclone cat --offset X --count Y`" + `.*
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view
|
||||
real-time transfer statistics.
|
||||
|
||||
` + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -3,6 +3,7 @@ package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -12,7 +13,9 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -21,6 +24,7 @@ var (
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -31,6 +35,7 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "")
|
||||
flags.BoolVarP(cmdFlags, &urls, "urls", "", stdout, "Use a CSV file of links to process multiple URLs", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -48,12 +53,23 @@ set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With |--print-filename| in addition, the resulting file name will be
|
||||
printed.
|
||||
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
Setting |--urls| allows you to input a CSV file of URLs in format: URL,
|
||||
FILENAME. If |--urls| is in use then replace the URL in the arguments with the
|
||||
file containing the URLs, e.g.:
|
||||
|||sh
|
||||
rclone copyurl --urls myurls.csv remote:dir
|
||||
|||
|
||||
Missing filenames will be autogenerated equivalent to using |--auto-filename|.
|
||||
Note that |--stdout| and |--print-filename| are incompatible with |--urls|.
|
||||
This will do |--transfers| copies in parallel. Note that if |--auto-filename|
|
||||
is desired for all URLs then a file with only URLs and no filename can be used.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
@@ -62,9 +78,7 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
|
||||
- |--bind ::0| to disable IPv4
|
||||
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
|
||||
- Make sure the site works with |curl| directly
|
||||
|
||||
`, "|", "`"),
|
||||
- Make sure the site works with |curl| directly`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
"groups": "Important",
|
||||
@@ -72,32 +86,93 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
cmd.Run(true, true, command, func() error {
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = operations.CopyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
if !urls {
|
||||
return run(args)
|
||||
}
|
||||
return err
|
||||
return runURLS(args)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var copyURL = operations.CopyURL // for testing
|
||||
|
||||
// runURLS processes a .csv file of urls and filenames
|
||||
func runURLS(args []string) (err error) {
|
||||
if stdout {
|
||||
return errors.New("can't use --stdout with --urls")
|
||||
}
|
||||
if printFilename {
|
||||
return errors.New("can't use --print-filename with --urls")
|
||||
}
|
||||
dstFs := cmd.NewFsDir(args[1:])
|
||||
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open .csv file: %w", err)
|
||||
}
|
||||
defer fs.CheckClose(f, &err)
|
||||
reader := csv.NewReader(f)
|
||||
reader.FieldsPerRecord = -1
|
||||
urlList, err := reader.ReadAll()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading .csv file: %w", err)
|
||||
}
|
||||
|
||||
ec := errcount.New()
|
||||
g, gCtx := errgroup.WithContext(context.Background())
|
||||
ci := fs.GetConfig(gCtx)
|
||||
g.SetLimit(ci.Transfers)
|
||||
|
||||
for _, urlEntry := range urlList {
|
||||
if len(urlEntry) == 0 {
|
||||
continue
|
||||
}
|
||||
g.Go(func() error {
|
||||
url := urlEntry[0]
|
||||
var filename string
|
||||
if len(urlEntry) > 1 {
|
||||
filename = urlEntry[1]
|
||||
}
|
||||
_, err := copyURL(gCtx, dstFs, filename, url, filename == "", headerFilename, noClobber)
|
||||
if err != nil {
|
||||
fs.Errorf(filename, "failed to copy URL %q: %v", url, err)
|
||||
ec.Add(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
ec.Add(g.Wait())
|
||||
return ec.Err("not all URLs copied successfully")
|
||||
}
|
||||
|
||||
// run runs the command for a single URL
|
||||
func run(args []string) error {
|
||||
var err error
|
||||
var dstFileName string
|
||||
var fsdst fs.Fs
|
||||
if !stdout {
|
||||
if len(args) < 2 {
|
||||
return errors.New("need 2 arguments if not using --stdout")
|
||||
}
|
||||
if args[1] == "-" {
|
||||
stdout = true
|
||||
} else if autoFilename {
|
||||
fsdst = cmd.NewFsDir(args[1:])
|
||||
} else {
|
||||
fsdst, dstFileName = cmd.NewFsDstFile(args[1:])
|
||||
}
|
||||
}
|
||||
|
||||
var dst fs.Object
|
||||
if stdout {
|
||||
err = operations.CopyURLToWriter(context.Background(), args[0], os.Stdout)
|
||||
} else {
|
||||
dst, err = copyURL(context.Background(), fsdst, dstFileName, args[0], autoFilename, headerFilename, noClobber)
|
||||
if printFilename && err == nil && dst != nil {
|
||||
fmt.Println(dst.Remote())
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
157
cmd/copyurl/copyurl_test.go
Normal file
157
cmd/copyurl/copyurl_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package copyurl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func resetGlobals() {
|
||||
autoFilename = false
|
||||
headerFilename = false
|
||||
printFilename = false
|
||||
stdout = false
|
||||
noClobber = false
|
||||
urls = false
|
||||
copyURL = operations.CopyURL
|
||||
}
|
||||
|
||||
func TestRun_RequiresTwoArgsWhenNotStdout(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
err := run([]string{"https://example.com/foo"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "need 2 arguments if not using --stdout")
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithExplicitFilename_Success(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
dstPath := filepath.Join(tmp, "out.txt")
|
||||
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "https://example.com/file", url)
|
||||
assert.Equal(t, "out.txt", dstFileName)
|
||||
assert.False(t, auto)
|
||||
assert.False(t, header)
|
||||
assert.False(t, noclobber)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/file", dstPath})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRun_CallsCopyURL_WithAutoFilename_AndPropagatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
autoFilename = true
|
||||
|
||||
want := errors.New("boom")
|
||||
var called int32
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&called, 1)
|
||||
assert.Equal(t, "", dstFileName) // auto filename -> empty
|
||||
assert.True(t, auto)
|
||||
return nil, want
|
||||
}
|
||||
|
||||
err := run([]string{"https://example.com/auto/name", tmp})
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, want, err)
|
||||
assert.Equal(t, int32(1), atomic.LoadInt32(&called))
|
||||
}
|
||||
|
||||
func TestRunURLS_ErrorsWithStdoutAndWithPrintFilename(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
stdout = true
|
||||
err := runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --stdout with --urls")
|
||||
|
||||
resetGlobals()
|
||||
printFilename = true
|
||||
err = runURLS([]string{"dummy.csv", "destDir"})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't use --print-filename with --urls")
|
||||
}
|
||||
|
||||
func TestRunURLS_ProcessesCSV_ParallelCalls_AndAggregatesError(t *testing.T) {
|
||||
t.Cleanup(resetGlobals)
|
||||
resetGlobals()
|
||||
|
||||
tmp := t.TempDir()
|
||||
csvPath := filepath.Join(tmp, "urls.csv")
|
||||
csvContent := []byte(
|
||||
"https://example.com/a,aaa.txt\n" + // success
|
||||
"https://example.com/b\n" + // auto filename
|
||||
"https://example.com/c,ccc.txt\n") // error
|
||||
require.NoError(t, os.WriteFile(csvPath, csvContent, 0o600))
|
||||
|
||||
// destination dir (local backend)
|
||||
dest := t.TempDir()
|
||||
|
||||
// mock copyURL: succeed for /a and /b, fail for /c
|
||||
|
||||
var calls int32
|
||||
var mu sync.Mutex
|
||||
var seen []string
|
||||
|
||||
copyURL = func(_ctx context.Context, _dst fs.Fs, dstFileName, url string, auto, header, noclobber bool) (fs.Object, error) {
|
||||
atomic.AddInt32(&calls, 1)
|
||||
mu.Lock()
|
||||
seen = append(seen, url+"|"+dstFileName)
|
||||
mu.Unlock()
|
||||
|
||||
switch {
|
||||
case url == "https://example.com/a":
|
||||
require.Equal(t, "aaa.txt", dstFileName)
|
||||
return nil, nil
|
||||
case url == "https://example.com/b":
|
||||
require.Equal(t, "", dstFileName) // auto-name path
|
||||
return nil, nil
|
||||
case url == "https://example.com/c":
|
||||
return nil, errors.New("network down")
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
err := runURLS([]string{csvPath, dest})
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "not all URLs copied successfully")
|
||||
// 3 lines => 3 calls
|
||||
assert.Equal(t, int32(3), atomic.LoadInt32(&calls))
|
||||
|
||||
// sanity: all expected URLs were seen
|
||||
assert.ElementsMatch(t,
|
||||
[]string{
|
||||
"https://example.com/a|aaa.txt",
|
||||
"https://example.com/b|",
|
||||
"https://example.com/c|ccc.txt",
|
||||
},
|
||||
seen,
|
||||
)
|
||||
}
|
||||
@@ -37,14 +37,18 @@ checksum of the file it has just encrypted.
|
||||
|
||||
Use it like this
|
||||
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
You can use it like this also, but that will involve downloading all
|
||||
the files in remote:path.
|
||||
the files in ` + "`remote:path`" + `.
|
||||
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
After it has run it will log the status of the ` + "`encryptedremote:`" + `.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
|
||||
@@ -33,13 +33,13 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
|
||||
|
||||
use it like this
|
||||
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
` + "```sh" + `
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
` + "```" + `
|
||||
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `
|
||||
command. See the documentation on the [crypt](/crypt/) overlay for more info.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
|
||||
@@ -47,15 +47,15 @@ directories have been merged.
|
||||
|
||||
Next, if deduping by name, for every group of duplicate file names /
|
||||
hashes, it will delete all but one identical file it finds without
|
||||
confirmation. This means that for most duplicated files the ` +
|
||||
"`dedupe`" + ` command will not be interactive.
|
||||
confirmation. This means that for most duplicated files the
|
||||
` + "`dedupe`" + ` command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
same file path and the same hash. If the backend does not support
|
||||
hashes (e.g. crypt wrapping Google Drive) then they will never be found
|
||||
to be identical. If you use the ` + "`--size-only`" + ` flag then files
|
||||
will be considered identical if they have the same size (any hash will be
|
||||
ignored). This can be useful on crypt backends which do not support hashes.
|
||||
|
||||
Next rclone will resolve the remaining duplicates. Exactly which
|
||||
action is taken depends on the dedupe mode. By default, rclone will
|
||||
@@ -68,71 +68,82 @@ Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```" + `
|
||||
|
||||
Now the ` + "`dedupe`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```" + `
|
||||
|
||||
The result being
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```" + `
|
||||
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag
|
||||
or by using an extra parameter with the same value
|
||||
|
||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
* ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
* ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
- ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
- ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
- ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
- ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
- ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
- ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
- ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
- ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
- ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
|
||||
For example, to rename all the identically named photos in your Google Photos directory, do
|
||||
For example, to rename all the identically named photos in your Google Photos
|
||||
directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```sh" + `
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```" + `
|
||||
|
||||
Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
` + "```sh" + `
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
` + "```",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -32,26 +32,29 @@ obeys include/exclude filters so can be used to selectively delete files.
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
the [purge](/commands/rclone_purge/) command.
|
||||
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
|
||||
[rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along
|
||||
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/)
|
||||
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
|
||||
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||
check what would be deleted (use either):
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
Then proceed with the actual delete:
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||
delete all files bigger than 100 MiB.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
|--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important,Filter,Listing",
|
||||
|
||||
@@ -19,9 +19,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
remove a directory and it doesn't obey include/exclude filters - if the
|
||||
specified file exists, it will always be removed.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -14,8 +14,7 @@ var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Run with ` + "`--help`" + ` to list the supported shells.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
|
||||
@@ -18,17 +18,21 @@ var bashCommandDefinition = &cobra.Command{
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone completion bash
|
||||
` + "```sh" + `
|
||||
rclone completion bash
|
||||
` + "```" + `
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```sh" + `
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```" + `
|
||||
|
||||
and so rclone will probably need to be run as root, or with sudo.
|
||||
|
||||
If you supply a path to a file as the command line argument, then
|
||||
If you supply a path to a file as the command line argument, then
|
||||
the generated script will be written to that file, in which case
|
||||
you should not need root privileges.
|
||||
|
||||
@@ -39,11 +43,12 @@ can logout and login again to use the autocompletion script.
|
||||
|
||||
Alternatively, you can source the script directly
|
||||
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```sh" + `
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```" + `
|
||||
|
||||
and the autocompletion functionality will be added to your
|
||||
current shell.
|
||||
`,
|
||||
current shell.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
|
||||
@@ -21,18 +21,21 @@ var fishCommandDefinition = &cobra.Command{
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion fish
|
||||
` + "```sh" + `
|
||||
sudo rclone completion fish
|
||||
` + "```" + `
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```sh" + `
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```" + `
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-", then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
|
||||
@@ -20,13 +20,14 @@ var powershellCommandDefinition = &cobra.Command{
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```sh" + `
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```" + `
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
If output_file is "-" or missing, then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-" or missing, then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user