mirror of
https://github.com/rclone/rclone.git
synced 2026-02-04 02:33:44 +00:00
Compare commits
170 Commits
build
...
fix-assume
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d376838f77 | ||
|
|
e7f11af1ca | ||
|
|
0b5c4cc442 | ||
|
|
178ddafdc7 | ||
|
|
ad316ec6e3 | ||
|
|
61b022dfc3 | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e | ||
|
|
4fc4898287 | ||
|
|
b003169088 | ||
|
|
babd112665 | ||
|
|
71b9b4ad7a | ||
|
|
4368863fcb | ||
|
|
04d49bf0ea | ||
|
|
d7aa37d263 | ||
|
|
379dffa61c | ||
|
|
5fd4ece31f | ||
|
|
fc3f95190b | ||
|
|
d6f5652b65 | ||
|
|
b5cbb7520d | ||
|
|
a170dfa55b | ||
|
|
1449c5b5ba | ||
|
|
35fe609722 | ||
|
|
cce399515f | ||
|
|
8c5af2f51c | ||
|
|
c639d3656e | ||
|
|
d9fbbba5c3 | ||
|
|
fd87560388 | ||
|
|
d87720a787 | ||
|
|
d541caa52b | ||
|
|
fd1665ae93 | ||
|
|
457d80e8a9 | ||
|
|
c5a3e86df8 | ||
|
|
4026e8db20 | ||
|
|
c9ce686231 | ||
|
|
b085598cbc | ||
|
|
bb47dccdeb | ||
|
|
7a279d2789 | ||
|
|
9bd5df658a | ||
|
|
d512e4d566 | ||
|
|
3dd68c824a | ||
|
|
fbe73c993b | ||
|
|
d915f75edf | ||
|
|
26b629f42f | ||
|
|
ceaac2194c | ||
|
|
1f14b6aa35 | ||
|
|
dd75af6a18 | ||
|
|
99e8a63df2 | ||
|
|
0019e18ac3 | ||
|
|
218c3bf6e9 | ||
|
|
8f9702583d | ||
|
|
e6578fb5a1 | ||
|
|
fa1d7da272 | ||
|
|
813708c24d | ||
|
|
fee4716343 | ||
|
|
6e9a675b3f | ||
|
|
7f5a444350 | ||
|
|
d2916ac5c7 | ||
|
|
3369a15285 | ||
|
|
58aee30de7 | ||
|
|
ef919241a6 | ||
|
|
d5386bb9a7 | ||
|
|
bf46ea5611 | ||
|
|
b8a379c9c9 | ||
|
|
8c37a9c2ef | ||
|
|
963a72ce01 | ||
|
|
a4962e21d1 | ||
|
|
9e200531b1 | ||
|
|
04683f2032 | ||
|
|
b41f7994da | ||
|
|
13a5ffe391 | ||
|
|
85deea82e4 | ||
|
|
89a8ea7a91 | ||
|
|
c8912eb6a0 | ||
|
|
01674949a1 | ||
|
|
98e1d3ee73 | ||
|
|
50d7a80331 | ||
|
|
bc3e8e1abd | ||
|
|
30e80d0716 | ||
|
|
f288920696 | ||
|
|
fa2bbd705c | ||
|
|
43a794860f | ||
|
|
adfe6b3bad | ||
|
|
091ccb649c | ||
|
|
2e02d49578 | ||
|
|
514535ad46 | ||
|
|
b010591c96 | ||
|
|
1aaee9edce | ||
|
|
3f0e9f5fca | ||
|
|
cfd0d28742 | ||
|
|
e7a2b322ec | ||
|
|
d3a0805a2b | ||
|
|
d4edf8ac18 | ||
|
|
87d14b000a | ||
|
|
12bded980b | ||
|
|
6e0e76af9d | ||
|
|
6f9b2f7b9b | ||
|
|
f61d79396d | ||
|
|
9b22e38450 | ||
|
|
9e4fe18830 | ||
|
|
ae5cc1ab37 | ||
|
|
d4be38ec02 | ||
|
|
115cff3007 | ||
|
|
70b862f026 |
19
.github/workflows/build.yml
vendored
19
.github/workflows/build.yml
vendored
@@ -100,7 +100,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
@@ -222,9 +222,9 @@ jobs:
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.24'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
check-latest: true
|
||||
cache: false
|
||||
|
||||
@@ -239,13 +239,13 @@ jobs:
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
@@ -269,7 +269,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
@@ -290,6 +290,7 @@ jobs:
|
||||
MAINTAINERS.md
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
@@ -310,7 +311,7 @@ jobs:
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '>=1.25.0-rc.1'
|
||||
|
||||
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
# There's no way around this, because "ImageOS" is only available to
|
||||
# processes, but the setup-go action uses it in its key.
|
||||
id: imageos
|
||||
uses: actions/github-script@v7
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
result-encoding: string
|
||||
script: |
|
||||
|
||||
256
.golangci.yml
256
.golangci.yml
@@ -1,144 +1,146 @@
|
||||
# golangci-lint configuration options
|
||||
version: "2"
|
||||
|
||||
linters:
|
||||
# Configure the linter set. To avoid unexpected results the implicit default
|
||||
# set is ignored and all the ones to use are explicitly enabled.
|
||||
default: none
|
||||
enable:
|
||||
# Default
|
||||
- errcheck
|
||||
- goimports
|
||||
- revive
|
||||
- ineffassign
|
||||
- govet
|
||||
- unconvert
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- gosimple
|
||||
- stylecheck
|
||||
- unused
|
||||
- misspell
|
||||
# Additional
|
||||
- gocritic
|
||||
#- prealloc
|
||||
#- maligned
|
||||
disable-all: true
|
||||
- misspell
|
||||
#- prealloc # TODO
|
||||
- revive
|
||||
- unconvert
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
staticcheck:
|
||||
# With staticcheck there is only one setting, so to extend the implicit
|
||||
# default value it must be explicitly included.
|
||||
checks:
|
||||
# Default
|
||||
- all
|
||||
- -ST1000
|
||||
- -ST1003
|
||||
- -ST1016
|
||||
- -ST1020
|
||||
- -ST1021
|
||||
- -ST1022
|
||||
# Disable quickfix checks
|
||||
- -QF*
|
||||
gocritic:
|
||||
# With gocritic there are different settings, but since enabled-checks
|
||||
# and disabled-checks cannot both be set, for full customization the
|
||||
# alternative is to disable all defaults and explicitly enable the ones
|
||||
# to use.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Skip default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Skip default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Skip default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Skip default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Skip default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Enable additional check that are not enabled by default
|
||||
#- singleCaseSwitch # Skip default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: ${base-path}/bin/rules.go
|
||||
revive:
|
||||
# With revive there is in reality only one setting, and when at least one
|
||||
# rule are specified then only these rules will be considered, defaults
|
||||
# and all others are then implicitly disabled, so must explicitly enable
|
||||
# all rules to be used.
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
#- name: empty-block # Skip default
|
||||
# disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
#- name: increment-decrement # Skip default
|
||||
# disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
#- name: redefines-builtin-id # Skip default
|
||||
# disabled: true
|
||||
#- name: superfluous-else # Skip default
|
||||
# disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
#- name: unreachable-code # Skip default
|
||||
# disabled: true
|
||||
#- name: unused-parameter # Skip default
|
||||
# disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- goimports
|
||||
|
||||
issues:
|
||||
# Enable some lints excluded by default
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
|
||||
- linters:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||
gocritic:
|
||||
# Enable all default checks with some exceptions and some additions (commented).
|
||||
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||
disable-all: true
|
||||
enabled-checks:
|
||||
#- appendAssign # Enabled by default
|
||||
- argOrder
|
||||
- assignOp
|
||||
- badCall
|
||||
- badCond
|
||||
#- captLocal # Enabled by default
|
||||
- caseOrder
|
||||
- codegenComment
|
||||
#- commentFormatting # Enabled by default
|
||||
- defaultCaseOrder
|
||||
- deprecatedComment
|
||||
- dupArg
|
||||
- dupBranchBody
|
||||
- dupCase
|
||||
- dupSubExpr
|
||||
- elseif
|
||||
#- exitAfterDefer # Enabled by default
|
||||
- flagDeref
|
||||
- flagName
|
||||
#- ifElseChain # Enabled by default
|
||||
- mapKey
|
||||
- newDeref
|
||||
- offBy1
|
||||
- regexpMust
|
||||
- ruleguard # Not enabled by default
|
||||
#- singleCaseSwitch # Enabled by default
|
||||
- sloppyLen
|
||||
- sloppyTypeAssert
|
||||
- switchTrue
|
||||
- typeSwitchVar
|
||||
- underef
|
||||
- unlambda
|
||||
- unslice
|
||||
- valSwap
|
||||
- wrapperFunc
|
||||
settings:
|
||||
ruleguard:
|
||||
rules: "${configDir}/bin/rules.go"
|
||||
|
||||
80
CODE_OF_CONDUCT.md
Normal file
80
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Rclone Code of Conduct
|
||||
|
||||
Like the technical community as a whole, the Rclone team and community
|
||||
is made up of a mixture of professionals and volunteers from all over
|
||||
the world, working on every aspect of the mission - including
|
||||
mentorship, teaching, and connecting people.
|
||||
|
||||
Diversity is one of our huge strengths, but it can also lead to
|
||||
communication issues and unhappiness. To that end, we have a few
|
||||
ground rules that we ask people to adhere to. This code applies
|
||||
equally to founders, mentors and those seeking help and guidance.
|
||||
|
||||
This isn't an exhaustive list of things that you can't do. Rather,
|
||||
take it in the spirit in which it's intended - a guide to make it
|
||||
easier to enrich all of us and the technical communities in which we
|
||||
participate.
|
||||
|
||||
This code of conduct applies to all spaces managed by the Rclone
|
||||
project or Rclone Services Ltd. This includes the issue tracker, the
|
||||
forum, the GitHub site, the wiki, any other online services or
|
||||
in-person events. In addition, violations of this code outside these
|
||||
spaces may affect a person's ability to participate within them.
|
||||
|
||||
- **Be friendly and patient.**
|
||||
- **Be welcoming.** We strive to be a community that welcomes and
|
||||
supports people of all backgrounds and identities. This includes,
|
||||
but is not limited to members of any race, ethnicity, culture,
|
||||
national origin, colour, immigration status, social and economic
|
||||
class, educational level, sex, sexual orientation, gender identity
|
||||
and expression, age, size, family status, political belief,
|
||||
religion, and mental and physical ability.
|
||||
- **Be considerate.** Your work will be used by other people, and you
|
||||
in turn will depend on the work of others. Any decision you take
|
||||
will affect users and colleagues, and you should take those
|
||||
consequences into account when making decisions. Remember that we're
|
||||
a world-wide community, so you might not be communicating in someone
|
||||
else's primary language.
|
||||
- **Be respectful.** Not all of us will agree all the time, but
|
||||
disagreement is no excuse for poor behavior and poor manners. We
|
||||
might all experience some frustration now and then, but we cannot
|
||||
allow that frustration to turn into a personal attack. It's
|
||||
important to remember that a community where people feel
|
||||
uncomfortable or threatened is not a productive one. Members of the
|
||||
Rclone community should be respectful when dealing with other
|
||||
members as well as with people outside the Rclone community.
|
||||
- **Be careful in the words that you choose.** We are a community of
|
||||
professionals, and we conduct ourselves professionally. Be kind to
|
||||
others. Do not insult or put down other participants. Harassment and
|
||||
other exclusionary behavior aren't acceptable. This includes, but is
|
||||
not limited to:
|
||||
- Violent threats or language directed against another person.
|
||||
- Discriminatory jokes and language.
|
||||
- Posting sexually explicit or violent material.
|
||||
- Posting (or threatening to post) other people's personally
|
||||
identifying information ("doxing").
|
||||
- Personal insults, especially those using racist or sexist terms.
|
||||
- Unwelcome sexual attention.
|
||||
- Advocating for, or encouraging, any of the above behavior.
|
||||
- Repeated harassment of others. In general, if someone asks you to
|
||||
stop, then stop.
|
||||
- **When we disagree, try to understand why.** Disagreements, both
|
||||
social and technical, happen all the time and Rclone is no
|
||||
exception. It is important that we resolve disagreements and
|
||||
differing views constructively. Remember that we're different. The
|
||||
strength of Rclone comes from its varied community, people from a
|
||||
wide range of backgrounds. Different people have different
|
||||
perspectives on issues. Being unable to understand why someone holds
|
||||
a viewpoint doesn't mean that they're wrong. Don't forget that it is
|
||||
human to err and blaming each other doesn't get us anywhere.
|
||||
Instead, focus on helping to resolve issues and learning from
|
||||
mistakes.
|
||||
|
||||
If you believe someone is violating the code of conduct, we ask that
|
||||
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
|
||||
|
||||
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
|
||||
|
||||
## Questions?
|
||||
|
||||
If you have questions, please feel free to [contact us](mailto:info@rclone.com).
|
||||
@@ -628,7 +628,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
46422
MANUAL.html
generated
46422
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
5935
MANUAL.txt
generated
5935
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
15
Makefile
15
Makefile
@@ -100,6 +100,7 @@ compiletest:
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@bin/markdown-lint
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
# Get the build dependencies
|
||||
@@ -113,21 +114,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
go generate ./lib/transform
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
go run bin/make_bisync_docs.go ./docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
|
||||
@@ -34,6 +34,7 @@ directories to and from different cloud storage providers.
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
@@ -50,6 +51,7 @@ directories to and from different cloud storage providers.
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
@@ -59,6 +61,7 @@ directories to and from different cloud storage providers.
|
||||
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
|
||||
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
@@ -94,6 +97,7 @@ directories to and from different cloud storage providers.
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
@@ -103,8 +107,10 @@ directories to and from different cloud storage providers.
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
- Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
@@ -1338,9 +1338,9 @@ func (f *Fs) containerOK(container string) bool {
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if !f.containerOK(containerName) {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -1348,16 +1348,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(containerName)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers returns all the containers to out
|
||||
@@ -1393,14 +1393,47 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2119,7 +2152,6 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
}
|
||||
metadata = make(map[string]*string, len(o.meta))
|
||||
for k, v := range o.meta {
|
||||
v := v
|
||||
metadata[k] = &v
|
||||
}
|
||||
return metadata
|
||||
@@ -2765,8 +2797,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2778,7 +2808,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2820,7 +2850,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -3156,6 +3186,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || wasm
|
||||
//go:build plan9 || solaris || js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
@@ -1313,10 +1313,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
srcURL := srcObj.fileClient().URL()
|
||||
fc := f.fileClient(remote)
|
||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
var properties file.GetPropertiesResponse
|
||||
pollTime := 100 * time.Millisecond
|
||||
|
||||
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
|
||||
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = properties.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package azurefiles
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azurefiles for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js || wasm
|
||||
//go:build plan9 || js
|
||||
|
||||
// Package azurefiles provides an interface to Microsoft Azure Files
|
||||
package azurefiles
|
||||
|
||||
@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
last := ""
|
||||
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -890,14 +890,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -2192,13 +2224,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
ChunkSize: up.chunkSize,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
return info, up, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@@ -2428,6 +2464,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
|
||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
// There is some confusion about how this is actually
|
||||
// returned. The []struct has worked for many years, but in
|
||||
// https://github.com/rclone/rclone/issues/8776 box was
|
||||
// returning it returned not as a list. We don't actually use
|
||||
// this so comment it out.
|
||||
//
|
||||
// Order struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
//
|
||||
// Order []struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
|
||||
4
backend/cache/cache.go
vendored
4
backend/cache/cache.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
start, end int64
|
||||
}
|
||||
parseChunks := func(ranges string) (crs []chunkRange, err error) {
|
||||
for _, part := range strings.Split(ranges, ",") {
|
||||
for part := range strings.SplitSeq(ranges, ",") {
|
||||
var start, end int64 = 0, math.MaxInt64
|
||||
switch ints := strings.Split(part, ":"); len(ints) {
|
||||
case 1:
|
||||
|
||||
2
backend/cache/cache_internal_test.go
vendored
2
backend/cache/cache_internal_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
//go:build !plan9 && !js && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -1,6 +1,6 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
//go:build !plan9 && !js && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/cache_unsupported.go
vendored
2
backend/cache/cache_unsupported.go
vendored
@@ -1,7 +1,7 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js || wasm
|
||||
//go:build plan9 || js
|
||||
|
||||
// Package cache implements a virtual provider to cache existing remotes.
|
||||
package cache
|
||||
|
||||
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm && !race
|
||||
//go:build !plan9 && !js && !race
|
||||
|
||||
package cache_test
|
||||
|
||||
|
||||
2
backend/cache/directory.go
vendored
2
backend/cache/directory.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/handle.go
vendored
2
backend/cache/handle.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/object.go
vendored
2
backend/cache/object.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
3
backend/cache/utils_test.go
vendored
3
backend/cache/utils_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -187,7 +187,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, upstream := range opt.Upstreams {
|
||||
upstream := upstream
|
||||
g.Go(func() (err error) {
|
||||
equal := strings.IndexRune(upstream, '=')
|
||||
if equal < 0 {
|
||||
@@ -241,18 +240,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || u.f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
@@ -366,7 +369,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
g.Go(func() (err error) {
|
||||
return fn(gCtx, u)
|
||||
})
|
||||
@@ -633,7 +635,6 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var uChans []chan time.Duration
|
||||
|
||||
for _, u := range f.upstreams {
|
||||
u := u
|
||||
if do := u.f.Features().ChangeNotify; do != nil {
|
||||
ch := make(chan time.Duration)
|
||||
uChans = append(uChans, ch)
|
||||
|
||||
@@ -598,7 +598,7 @@ It doesn't return anything.
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
@@ -625,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -18,7 +18,7 @@ type headerLink struct {
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
for link := range strings.SplitSeq(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
for part := range strings.SplitSeq(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
|
||||
@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
||||
if scopesString == "" {
|
||||
scopesString = defaultScope
|
||||
}
|
||||
for _, scope := range strings.Split(scopesString, ",") {
|
||||
for scope := range strings.SplitSeq(scopesString, ",") {
|
||||
scope = strings.TrimSpace(scope)
|
||||
scopes = append(scopes, scopePrefix+scope)
|
||||
}
|
||||
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
|
||||
// into a list of unique extensions with leading "." and a list of associated MIME types
|
||||
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
|
||||
for _, extensionText := range extensionsIn {
|
||||
for _, extension := range strings.Split(extensionText, ",") {
|
||||
for extension := range strings.SplitSeq(extensionText, ",") {
|
||||
extension = strings.ToLower(strings.TrimSpace(extension))
|
||||
if extension == "" {
|
||||
continue
|
||||
|
||||
@@ -386,7 +386,6 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
||||
g.SetLimit(o.fs.ci.Checkers)
|
||||
var mu sync.Mutex // protect the info.Permissions from concurrent writes
|
||||
for _, permissionID := range info.PermissionIds {
|
||||
permissionID := permissionID
|
||||
g.Go(func() error {
|
||||
// must fetch the team drive ones individually to check the inherited flag
|
||||
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
|
||||
@@ -520,7 +519,6 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
k, v := k, v
|
||||
// parse a boolean from v and write into out
|
||||
parseBool := func(out *bool) error {
|
||||
b, err := strconv.ParseBool(v)
|
||||
|
||||
@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
FldID any `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
var errFileNotFound = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
|
||||
@@ -283,6 +283,7 @@ type Fs struct {
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
tlsConf *tls.Config // default TLS client config
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
@@ -408,9 +409,14 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
if f.tlsConf != nil {
|
||||
tlsConfig = f.tlsConf.Clone()
|
||||
} else {
|
||||
tlsConfig = new(tls.Config)
|
||||
}
|
||||
tlsConfig.ServerName = f.opt.Host
|
||||
if f.opt.SkipVerifyTLSCert {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
@@ -671,6 +677,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
|
||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-east5",
|
||||
Help: "Ohio",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
@@ -760,7 +763,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -768,16 +771,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
@@ -820,14 +823,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1462,6 +1497,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
"x-archive-auto-make-bucket": "1",
|
||||
"x-archive-queue-derive": "0",
|
||||
"x-archive-keep-old-version": "0",
|
||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-metadata-directive": "COPY",
|
||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||
"x-archive-filemeta-md5": srcObj.md5,
|
||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
for _, v := range seg {
|
||||
newValues = append(newValues, url.QueryEscape(v))
|
||||
}
|
||||
return strings.Join(newValues, "/")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -59,31 +60,43 @@ const (
|
||||
configVersion = 1
|
||||
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
key string
|
||||
name string
|
||||
domain string
|
||||
realm string
|
||||
clientID string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
// The list of services and their settings for supporting traditional OAuth.
|
||||
// Please keep these in alphabetical order, but with jottacloud first.
|
||||
func getServices() []service {
|
||||
return []service{
|
||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -159,36 +172,44 @@ func init() {
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch conf.State {
|
||||
case "":
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||
return nil, errors.New("not supported by this backend")
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
Help: `Standard authentication.
|
||||
This is primarily supported by the official service, but may also be
|
||||
supported by some white-label services. It is designed for command-line
|
||||
applications, and you will be asked to enter a single-use personal login
|
||||
token which you must manually generate from the account security settings
|
||||
in the web interface of your service.`,
|
||||
}, {
|
||||
Value: "traditional",
|
||||
Help: `Traditional authentication.
|
||||
This is supported by the official service and all white-label services
|
||||
that rclone knows about. You will be asked which service to connect to.
|
||||
It has a limitation of only a single active authentication at a time. You
|
||||
need to be on, or have access to, a machine with an internet-connected
|
||||
web browser.`,
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
Help: `Legacy authentication.
|
||||
This is no longer supported by any known services and not recommended
|
||||
used. You will be asked for your account's username and password.`,
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
loginToken := conf.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
@@ -203,10 +224,50 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "traditional":
|
||||
services := getServices()
|
||||
options := make([]fs.OptionExample, 0, len(services))
|
||||
for _, service := range services {
|
||||
options = append(options, fs.OptionExample{
|
||||
Value: service.key,
|
||||
Help: service.name,
|
||||
})
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||
options)
|
||||
case "traditional_type":
|
||||
services := getServices()
|
||||
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||
}
|
||||
service := services[i]
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||
}
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, service.clientID)
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||
TokenURL: wellKnown.TokenEndpoint,
|
||||
ClientID: service.clientID,
|
||||
Scopes: service.scopes,
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
@@ -214,7 +275,7 @@ machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||
@@ -223,16 +284,16 @@ machines.`)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
m.Set(configUsername, conf.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("password", conf.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -242,12 +303,12 @@ machines.`)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
@@ -260,7 +321,7 @@ machines.`)
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
@@ -272,58 +333,6 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -331,7 +340,7 @@ section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
if conf.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
@@ -372,7 +381,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
device := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -432,7 +441,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
mountpoint := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -463,7 +472,7 @@ You may create a new by entering a unique name.`, device)
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
@@ -478,7 +487,7 @@ You may create a new by entering a unique name.`, device)
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -929,12 +938,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
@@ -1000,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
@@ -1009,13 +1025,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js || wasm || linux
|
||||
//go:build windows || plan9 || js || linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js && !wasm && !linux
|
||||
//go:build !windows && !plan9 && !js && !linux
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build plan9 || js || wasm
|
||||
//go:build plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js && !wasm
|
||||
//go:build !windows && !plan9 && !js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -841,7 +842,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
err := os.Remove(localPath)
|
||||
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||
if os.Chmod(localPath, 0o600) == nil {
|
||||
err = os.Remove(localPath)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
|
||||
|
||||
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
|
||||
ctx := context.Background()
|
||||
whenRFC := when.Format(time.RFC3339Nano)
|
||||
whenRFC := when.Local().Format(time.RFC3339Nano)
|
||||
const dayLength = len("2001-01-01")
|
||||
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||
// and https://github.com/golang/go/issues/26295
|
||||
func TestRmdirWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build dragonfly || plan9 || js || wasm
|
||||
//go:build dragonfly || plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !plan9 && !js && !wasm
|
||||
//go:build !windows && !plan9 && !js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build windows || plan9 || js || wasm
|
||||
//go:build windows || plan9 || js
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -400,7 +400,7 @@ type quirks struct {
|
||||
}
|
||||
|
||||
func (q *quirks) parseQuirks(option string) {
|
||||
for _, flag := range strings.Split(option, ",") {
|
||||
for flag := range strings.SplitSeq(option, ",") {
|
||||
switch strings.ToLower(strings.TrimSpace(flag)) {
|
||||
case "binlist":
|
||||
// The official client sometimes uses a so called "bin" protocol,
|
||||
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||
f.speedupAny = false
|
||||
uniqueValidPatterns := make(map[string]any)
|
||||
|
||||
for _, pattern := range strings.Split(patternString, ",") {
|
||||
for pattern := range strings.SplitSeq(patternString, ",") {
|
||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||
if pattern == "" {
|
||||
continue
|
||||
|
||||
@@ -325,13 +325,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
}
|
||||
|
||||
// listDir lists the bucket to the entries
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to entries
|
||||
@@ -354,15 +353,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -629,6 +659,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -243,7 +243,6 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||
numSet = 0
|
||||
for k, v := range metadata {
|
||||
k, v := k, v
|
||||
switch k {
|
||||
case "mtime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
@@ -422,12 +421,7 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
|
||||
@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
@@ -254,15 +254,47 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -411,24 +443,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
return callback(entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
@@ -765,6 +797,7 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || wasm
|
||||
//go:build plan9 || solaris || js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !solaris && !js && !wasm
|
||||
//go:build !plan9 && !solaris && !js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -136,8 +137,25 @@ type Link struct {
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
if l == nil || l.URL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Primary validation: check URL's expire query parameter
|
||||
if u, err := url.Parse(l.URL); err == nil {
|
||||
if expireStr := u.Query().Get("expire"); expireStr != "" {
|
||||
// Try parsing as Unix timestamp (seconds)
|
||||
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
|
||||
expireTime := time.Unix(expireInt, 0)
|
||||
return time.Now().Add(10 * time.Second).Before(expireTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback validation: use the Expire field if URL parsing didn't work
|
||||
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
|
||||
99
backend/pikpak/api/types_test.go
Normal file
99
backend/pikpak/api/types_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestLinkValid tests the Link.Valid method for various scenarios
|
||||
func TestLinkValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
link *Link
|
||||
expected bool
|
||||
desc string
|
||||
}{
|
||||
{
|
||||
name: "nil link",
|
||||
link: nil,
|
||||
expected: false,
|
||||
desc: "nil link should be invalid",
|
||||
},
|
||||
{
|
||||
name: "empty URL",
|
||||
link: &Link{URL: ""},
|
||||
expected: false,
|
||||
desc: "empty URL should be invalid",
|
||||
},
|
||||
{
|
||||
name: "valid URL with future expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL with future expire parameter should be valid",
|
||||
},
|
||||
{
|
||||
name: "expired URL with past expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL with past expire parameter should be invalid",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter takes precedence over Expire field",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL expire parameter should take precedence over Expire field",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter within 10 second buffer should be invalid",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL expire parameter within 10 second buffer should be invalid",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when no URL expire parameter",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL has no expire parameter",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when URL expire parameter is invalid",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file?expire=invalid",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)),
|
||||
},
|
||||
expected: false,
|
||||
desc: "should be invalid when both URL expire and Expire field are expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.link.Valid()
|
||||
if result != tt.expected {
|
||||
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,3 @@
|
||||
//go:build !wasm
|
||||
|
||||
// Package protondrive implements the Proton Drive backend
|
||||
package protondrive
|
||||
|
||||
@@ -15,6 +13,8 @@ import (
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -89,6 +89,17 @@ The value can also be provided with --protondrive-2fa=000000
|
||||
The 2FA code of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "otp_secret_key",
|
||||
Help: `The OTP secret key
|
||||
|
||||
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
|
||||
|
||||
The OTP secret key of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: clientUIDKey,
|
||||
Help: "Client uid key (internal use only)",
|
||||
@@ -193,6 +204,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
MailboxPassword string `config:"mailbox_password"`
|
||||
TwoFA string `config:"2fa"`
|
||||
OtpSecretKey string `config:"otp_secret_key"`
|
||||
|
||||
// advanced
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -358,7 +370,15 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
|
||||
config.FirstLoginCredential.Username = opt.Username
|
||||
config.FirstLoginCredential.Password = opt.Password
|
||||
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
||||
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
|
||||
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
||||
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
|
||||
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
|
||||
}
|
||||
config.FirstLoginCredential.TwoFA = code
|
||||
}
|
||||
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
||||
@@ -397,6 +417,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
if opt.OtpSecretKey != "" {
|
||||
var err error
|
||||
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
//go:build !wasm
|
||||
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
// Build for sftp for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build wasm
|
||||
|
||||
// Package protondrive implements the Proton Drive backend
|
||||
package protondrive
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test QingStor filesystem interface
|
||||
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || js || wasm
|
||||
//go:build plan9 || js
|
||||
|
||||
// Package qingstor provides an interface to QingStor object storage
|
||||
// Home: https://www.qingcloud.com/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Upload object to QingStor
|
||||
|
||||
//go:build !plan9 && !js && !wasm
|
||||
//go:build !plan9 && !js
|
||||
|
||||
package qingstor
|
||||
|
||||
|
||||
@@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
|
||||
1142
backend/s3/s3.go
1142
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
package sftp
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Test Sftp filesystem interface
|
||||
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp_test
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for sftp for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || wasm
|
||||
//go:build plan9
|
||||
|
||||
// Package sftp provides a filesystem interface using github.com/pkg/sftp
|
||||
package sftp
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build !plan9 && !wasm
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
|
||||
@@ -200,7 +200,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
pool := newFilePool(ctx, fs, "testshare", "/test/path")
|
||||
|
||||
const numGoroutines = 10
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
mockFile := newMockFile()
|
||||
pool.pool = append(pool.pool, mockFile)
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
// Test concurrent get operations
|
||||
done := make(chan bool, numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
go func() {
|
||||
defer func() { done <- true }()
|
||||
|
||||
@@ -219,7 +219,7 @@ func TestFilePool_ConcurrentAccess(t *testing.T) {
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
for range numGoroutines {
|
||||
<-done
|
||||
}
|
||||
|
||||
|
||||
@@ -192,6 +192,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if root is empty or ends with / (must be a directory)
|
||||
isRootDir := isPathDir(root)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
@@ -218,6 +221,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Skip stat check if root is already a directory
|
||||
if isRootDir {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -894,6 +902,11 @@ func ensureSuffix(s, suffix string) string {
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
// isPathDir determines if a path represents a directory based on trailing slash
|
||||
func isPathDir(path string) bool {
|
||||
return path == "" || strings.HasSuffix(path, "/")
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
|
||||
41
backend/smb/smb_internal_test.go
Normal file
41
backend/smb/smb_internal_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Unit tests for internal SMB functions
|
||||
package smb
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestIsPathDir tests the isPathDir function logic
|
||||
func TestIsPathDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Empty path should be considered a directory
|
||||
{"", true},
|
||||
|
||||
// Paths with trailing slash should be directories
|
||||
{"/", true},
|
||||
{"share/", true},
|
||||
{"share/dir/", true},
|
||||
{"share/dir/subdir/", true},
|
||||
|
||||
// Paths without trailing slash should not be directories
|
||||
{"share", false},
|
||||
{"share/dir", false},
|
||||
{"share/dir/file", false},
|
||||
{"share/dir/subdir/file", false},
|
||||
|
||||
// Edge cases
|
||||
{"share//", true},
|
||||
{"share///", true},
|
||||
{"share/dir//", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isPathDir(tt.path)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -561,6 +561,21 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// Fetch the base container's policy to be used if/when we need to create a
|
||||
// segments container to ensure we use the same policy.
|
||||
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err := f.c.Container(ctx, container)
|
||||
|
||||
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
|
||||
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
|
||||
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -590,6 +605,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
f.opt.UseSegmentsContainer.Valid = true
|
||||
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
||||
}
|
||||
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
@@ -773,21 +789,20 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
|
||||
if container == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
// List the objects
|
||||
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
|
||||
entries = append(entries, entry)
|
||||
return nil
|
||||
return callback(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// container must be present if listing succeeded
|
||||
f.cache.MarkOK(container)
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// listContainers lists the containers
|
||||
@@ -818,14 +833,46 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listContainers(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.listContainers(ctx)
|
||||
}
|
||||
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1101,6 +1148,13 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
|
||||
container: dstContainer,
|
||||
}
|
||||
if f.opt.UseSegmentsContainer.Value {
|
||||
if f.opt.StoragePolicy == "" {
|
||||
_, err = f.fetchStoragePolicy(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
su.container += segmentsContainerSuffix
|
||||
err = f.makeContainer(ctx, su.container)
|
||||
if err != nil {
|
||||
@@ -1650,6 +1704,7 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -76,6 +76,7 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
@@ -195,4 +196,50 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
require.Equal(t, obj.Size(), objTarget.Size())
|
||||
}
|
||||
|
||||
func (f *Fs) testPolicyDiscovery(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
container := "testPolicyDiscovery-1"
|
||||
// Reset the policy so we can test if it is populated.
|
||||
f.opt.StoragePolicy = ""
|
||||
err := f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Default policy for SAIO image is 1replica.
|
||||
assert.Equal(t, "1replica", f.opt.StoragePolicy)
|
||||
|
||||
// Create a container using a non-default policy, and check to ensure
|
||||
// that the created segments container uses the same non-default policy.
|
||||
policy := "Policy-1"
|
||||
container = "testPolicyDiscovery-2"
|
||||
|
||||
f.opt.StoragePolicy = policy
|
||||
err = f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset the policy so we can test if it is populated, and set to the
|
||||
// non-default policy.
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
|
||||
// Test that when a segmented upload container is made, the newly
|
||||
// created container inherits the non-default policy of the base
|
||||
// container.
|
||||
f.opt.StoragePolicy = ""
|
||||
f.opt.UseSegmentsContainer.Value = true
|
||||
su, err := f.newSegmentedUpload(ctx, container, "")
|
||||
require.NoError(t, err)
|
||||
// The container name we expected?
|
||||
segmentsContainer := container + segmentsContainerSuffix
|
||||
assert.Equal(t, segmentsContainer, su.container)
|
||||
// The policy we expected?
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, su.container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Package common defines code common to the union and the policies
|
||||
//
|
||||
// These need to be defined in a separate package to avoid import loops
|
||||
package common
|
||||
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
|
||||
|
||||
import "github.com/rclone/rclone/fs"
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
for _, u := range upstreams {
|
||||
u := u // Closure
|
||||
go func() {
|
||||
rfs := u.RootFs
|
||||
remote := path.Join(u.RootPath, filePath)
|
||||
|
||||
@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
|
||||
hashes = make(map[hash.Type]string)
|
||||
for _, checksums := range p.Checksums {
|
||||
checksums = strings.ToLower(checksums)
|
||||
for _, checksum := range strings.Split(checksums, " ") {
|
||||
for checksum := range strings.SplitSeq(checksums, " ") {
|
||||
switch {
|
||||
case strings.HasPrefix(checksum, "sha1:"):
|
||||
hashes[hash.SHA1] = checksum[5:]
|
||||
|
||||
@@ -73,8 +73,7 @@ var osarches = []string{
|
||||
"plan9/386",
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
"js/wasm",
|
||||
"wasip1/wasm",
|
||||
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
|
||||
119
bin/make-test-certs.sh
Executable file
119
bin/make-test-certs.sh
Executable file
@@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Create test TLS certificates for use with rclone.
|
||||
|
||||
OUT_DIR="${OUT_DIR:-./tls-test}"
|
||||
CA_SUBJ="${CA_SUBJ:-/C=US/ST=Test/L=Test/O=Test Org/OU=Test Unit/CN=Test Root CA}"
|
||||
SERVER_CN="${SERVER_CN:-localhost}"
|
||||
CLIENT_CN="${CLIENT_CN:-Test Client}"
|
||||
CLIENT_KEY_PASS="${CLIENT_KEY_PASS:-testpassword}"
|
||||
|
||||
CA_DAYS=${CA_DAYS:-3650}
|
||||
SERVER_DAYS=${SERVER_DAYS:-825}
|
||||
CLIENT_DAYS=${CLIENT_DAYS:-825}
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
cd "$OUT_DIR"
|
||||
|
||||
# Create OpenSSL config
|
||||
|
||||
# CA extensions
|
||||
cat > ca_openssl.cnf <<'EOF'
|
||||
[ ca_ext ]
|
||||
basicConstraints = critical, CA:true, pathlen:1
|
||||
keyUsage = critical, keyCertSign, cRLSign
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always,issuer
|
||||
EOF
|
||||
|
||||
# Server extensions (SAN includes localhost + loopback IP)
|
||||
cat > server_openssl.cnf <<EOF
|
||||
[ server_ext ]
|
||||
basicConstraints = critical, CA:false
|
||||
keyUsage = critical, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[ alt_names ]
|
||||
DNS.1 = ${SERVER_CN}
|
||||
IP.1 = 127.0.0.1
|
||||
EOF
|
||||
|
||||
# Client extensions (for mTLS client auth)
|
||||
cat > client_openssl.cnf <<'EOF'
|
||||
[ client_ext ]
|
||||
basicConstraints = critical, CA:false
|
||||
keyUsage = critical, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = clientAuth
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer
|
||||
EOF
|
||||
|
||||
echo "Create CA key, CSR, and self-signed CA cert"
|
||||
if [ ! -f ca.key.pem ]; then
|
||||
openssl genrsa -out ca.key.pem 4096
|
||||
chmod 600 ca.key.pem
|
||||
fi
|
||||
|
||||
openssl req -new -key ca.key.pem -subj "$CA_SUBJ" -out ca.csr.pem
|
||||
|
||||
openssl x509 -req -in ca.csr.pem -signkey ca.key.pem \
|
||||
-sha256 -days "$CA_DAYS" \
|
||||
-extfile ca_openssl.cnf -extensions ca_ext \
|
||||
-out ca.cert.pem
|
||||
|
||||
echo "Create server key (NO PASSWORD) and cert signed by CA"
|
||||
openssl genrsa -out server.key.pem 2048
|
||||
chmod 600 server.key.pem
|
||||
|
||||
openssl req -new -key server.key.pem -subj "/CN=${SERVER_CN}" -out server.csr.pem
|
||||
|
||||
openssl x509 -req -in server.csr.pem \
|
||||
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||
-out server.cert.pem -days "$SERVER_DAYS" -sha256 \
|
||||
-extfile server_openssl.cnf -extensions server_ext
|
||||
|
||||
echo "Create client key (PASSWORD-PROTECTED), CSR, and cert"
|
||||
openssl genrsa -aes256 -passout pass:"$CLIENT_KEY_PASS" -out client.key.pem 2048
|
||||
chmod 600 client.key.pem
|
||||
|
||||
openssl req -new -key client.key.pem -passin pass:"$CLIENT_KEY_PASS" \
|
||||
-subj "/CN=${CLIENT_CN}" -out client.csr.pem
|
||||
|
||||
openssl x509 -req -in client.csr.pem \
|
||||
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
|
||||
-out client.cert.pem -days "$CLIENT_DAYS" -sha256 \
|
||||
-extfile client_openssl.cnf -extensions client_ext
|
||||
|
||||
echo "Verify chain"
|
||||
openssl verify -CAfile ca.cert.pem server.cert.pem client.cert.pem
|
||||
|
||||
echo "Done"
|
||||
|
||||
echo
|
||||
echo "Summary"
|
||||
echo "-------"
|
||||
printf "%-22s %s\n" \
|
||||
"CA key:" "ca.key.pem" \
|
||||
"CA cert:" "ca.cert.pem" \
|
||||
"Server key:" "server.key.pem (no password)" \
|
||||
"Server CSR:" "server.csr.pem" \
|
||||
"Server cert:" "server.cert.pem (SAN: ${SERVER_CN}, 127.0.0.1)" \
|
||||
"Client key:" "client.key.pem (encrypted)" \
|
||||
"Client CSR:" "client.csr.pem" \
|
||||
"Client cert:" "client.cert.pem" \
|
||||
"Client key password:" "$CLIENT_KEY_PASS"
|
||||
|
||||
echo
|
||||
echo "Test rclone server"
|
||||
echo
|
||||
echo "rclone serve http -vv --addr :8080 --cert ${OUT_DIR}/server.cert.pem --key ${OUT_DIR}/server.key.pem --client-ca ${OUT_DIR}/ca.cert.pem ."
|
||||
|
||||
echo
|
||||
echo "Test rclone client"
|
||||
echo
|
||||
echo "rclone lsf :http: --http-url 'https://localhost:8080' --ca-cert ${OUT_DIR}/ca.cert.pem --client-cert ${OUT_DIR}/client.cert.pem --client-key ${OUT_DIR}/client.key.pem --client-pass \$(rclone obscure $CLIENT_KEY_PASS)"
|
||||
echo
|
||||
159
bin/make_bisync_docs.go
Normal file
159
bin/make_bisync_docs.go
Normal file
@@ -0,0 +1,159 @@
|
||||
//go:build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/runs"
|
||||
"github.com/stretchr/testify/assert/yaml"
|
||||
)
|
||||
|
||||
var path = flag.String("path", "./docs/content/", "root path")
|
||||
|
||||
const (
|
||||
configFile = "fstest/test_all/config.yaml"
|
||||
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
|
||||
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
|
||||
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing ignores: %v", err)
|
||||
}
|
||||
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
|
||||
if err != nil {
|
||||
fs.Errorf(*path, "error replacing failures: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// replaceBetween replaces the text between startSep and endSep with fn()
|
||||
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
|
||||
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
doc := string(b)
|
||||
|
||||
before, after, found := strings.Cut(doc, startSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", startSep)
|
||||
}
|
||||
_, after, found = strings.Cut(after, endSep)
|
||||
if !found {
|
||||
return fmt.Errorf("could not find: %v", endSep)
|
||||
}
|
||||
|
||||
replaceSection, err := fn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
|
||||
|
||||
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getIgnores updates the list of ignores from config.yaml
|
||||
func getIgnores() (string, error) {
|
||||
config, err := parseConfig()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse config: %v", err)
|
||||
}
|
||||
s := ""
|
||||
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
|
||||
return cmp.Compare(a.Remote, b.Remote)
|
||||
})
|
||||
for _, backend := range config.Backends {
|
||||
include := false
|
||||
|
||||
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
|
||||
include = true
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
|
||||
for _, ignore := range backend.Ignore {
|
||||
if strings.Contains(strings.ToLower(ignore), "bisync") {
|
||||
if !include { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
|
||||
}
|
||||
include = true
|
||||
s += fmt.Sprintf(" - `%s`\n", ignore)
|
||||
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// getFailures updates the list of currently failing tests from the integration tests server
|
||||
func getFailures() (string, error) {
|
||||
var buf bytes.Buffer
|
||||
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
r := runs.Report{}
|
||||
err = json.Unmarshal(buf.Bytes(), &r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal json: %v", err)
|
||||
}
|
||||
|
||||
s := ""
|
||||
for _, run := range r.Failed {
|
||||
for i, t := range run.FailedTests {
|
||||
if strings.Contains(strings.ToLower(t), "bisync") {
|
||||
|
||||
if i == 0 { // don't have header row yet
|
||||
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
|
||||
}
|
||||
|
||||
url := integrationTestsHTMLURL + run.TrialName
|
||||
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
|
||||
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
|
||||
|
||||
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
|
||||
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
s += fmt.Sprintf("- Updated: %v", r.DateTime)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// parseConfig reads and parses the config.yaml file
|
||||
func parseConfig() (*runs.Config, error) {
|
||||
d, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
config := &runs.Config{}
|
||||
err = yaml.Unmarshal(d, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user