1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-21 10:43:37 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
7d97ba0a0f pool: fix deadlock with --max-memory and multipart transfers
Because multipart transfers can need more than one buffer to complete,
if transfers was set very high, it was possible for lots of multipart
transfers to start, grab fewer buffers than chunk size, then deadlock
because no more memory was available.

This fixes the problem by introducing a reservation system which the
multipart transfer uses to ensure it can reserve all the memory for
one chunk before starting.
2025-04-22 17:25:34 +01:00
495 changed files with 48197 additions and 104138 deletions

View File

@@ -23,18 +23,15 @@ jobs:
build:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 60
defaults:
run:
shell: bash
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -45,14 +42,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -61,14 +58,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -78,14 +75,14 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.24
- job_name: go1.23
os: ubuntu-latest
go: '1.24'
go: '1.23'
quicktest: true
racequicktest: true
@@ -95,17 +92,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
@@ -114,6 +112,7 @@ jobs:
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
@@ -123,6 +122,7 @@ jobs:
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
@@ -151,6 +151,7 @@ jobs:
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
@@ -162,24 +163,29 @@ jobs:
env
- name: Build rclone
shell: bash
run: |
make
- name: Rclone version
shell: bash
run: |
rclone version
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Run librclone tests
shell: bash
run: |
make -C librclone/ctest test
make -C librclone/ctest clean
@@ -187,12 +193,14 @@ jobs:
if: matrix.librclonetest
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
make ci_beta
@@ -211,20 +219,21 @@ jobs:
steps:
- name: Get runner parameters
id: get-runner-parameters
shell: bash
run: |
echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.24.0-rc.1'
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
@@ -239,13 +248,13 @@ jobs:
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
@@ -253,7 +262,7 @@ jobs:
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
@@ -261,7 +270,7 @@ jobs:
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
@@ -269,7 +278,7 @@ jobs:
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v8
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
@@ -282,19 +291,8 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
@@ -305,17 +303,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.25.0-rc.1'
go-version: '>=1.24.0-rc.1'
- name: Set global environment variables
shell: bash
run: |
echo "VERSION=$(make version)" >> $GITHUB_ENV
@@ -334,6 +333,7 @@ jobs:
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -347,6 +347,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -359,6 +360,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
@@ -371,6 +373,7 @@ jobs:
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV

View File

@@ -52,7 +52,7 @@ jobs:
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -92,7 +92,7 @@ jobs:
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
result-encoding: string
script: |
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -30,7 +30,7 @@ jobs:
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -1,146 +1,144 @@
version: "2"
# golangci-lint configuration options
linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable:
# Default
- errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports
- revive
- ineffassign
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,43 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -15,81 +15,61 @@ with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from
`rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to
obscure them
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
first so it can be discussed.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise
your local rclone project:
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
```sh
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
```
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Note that most of the terminal commands in the rest of this guide must be
executed from the rclone folder created above.
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
```sh
go version
```
go version
Great, you can now compile and execute your own version of rclone:
```sh
go build
./rclone version
```
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
```sh
git checkout -b my-new-feature
```
git checkout -b my-new-feature
And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
and a quick view on the rclone [code organisation](#code-organisation).
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the
code you changed
When ready - test the affected functionality and run the unit tests for the code you changed
```sh
cd folder/with/changed/files
go test -v
```
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read
the rclone [testing](#testing) section too.
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Make sure you
@@ -99,19 +79,14 @@ Make sure you
When you are done with that push your changes to GitHub:
```sh
git push -u origin my-new-feature
```
git push -u origin my-new-feature
and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
or [squash your commits](#squashing-your-commits).
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub
@@ -119,118 +94,87 @@ or [squash your commits](#squashing-your-commits).
Follow the guideline for [commit messages](#commit-messages) and then:
```sh
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
```
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
```sh
git commit --amend
```
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
```sh
git push --force origin my-new-feature
```
git push --force origin my-new-feature
### Basing your changes on the latest master
To base your changes on the latest version of the
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```sh
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
```
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits
### Squashing your commits ###
To combine your commits into one commit:
```sh
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
```
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
```sh
git commit # To commit the undone commits as one
```
git commit # To commit the undone commits as one
otherwise, you may roll back using:
```sh
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a
more complex situation.
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing
### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
you can run the same tests as get run in the CI which can be very helpful.
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
```sh
go test -v ./...
```
go test -v ./...
You can also use `make`, if supported by your platform
```sh
make quicktest
```
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration)
when you push your branch to GitHub.
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing
@@ -246,51 +190,41 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
```sh
cd backend/drive
go test -v
```
cd backend/drive
go test -v
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
but they can be run against any of the remotes.
```sh
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/operations
go test -v -remote TestDrive:
```
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the
project root:
```sh
go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive
```
go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive
### Full integration testing
If you want to run all the integration tests against all the remotes,
then change into the project root and run
```sh
make check
make test
```
make check
make test
The commands may require some extra go packages which you can install with
```sh
make build_dep
```
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at <https://pub.rclone.org/integration-tests/>
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation
@@ -298,48 +232,46 @@ Rclone code is organised into a small number of top level directories
with modules beneath.
- backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers
- ...providers
- all - import this to load all the cloud providers
- ...providers
- bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands
- all - import this to load all the commands
- ...commands
- all - import this to load all the commands
- ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated
or portions marked autogenerated where the corresponding .go file must be
edited instead, and everything else is autogenerated
- commands - these are auto-generated, edit the corresponding .go file
- content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- fstest - provides integration test framework
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- graphics - the images used in the website, etc.
- lib - libraries used by the backend
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
@@ -347,36 +279,6 @@ with modules beneath.
If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format is checked as part of the lint operation
that runs automatically on pull requests, to enforce standards and consistency.
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool, which can also be integrated into editors so you can perform the same
checks while writing.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order.
@@ -385,40 +287,39 @@ If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field.
- Start with the most important information about the option,
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
When writing documentation for an entirely new backend,
see [backend documentation](#backend-documentation).
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
If you are updating documentation for a command, you must do that in the
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
sentence on a single line, without a period/full stop character at the end,
as it will be combined unmodified with other information (such as any default
value).
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
Note that you can use
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release
@@ -449,13 +350,13 @@ change will get linked into the issue.
Here is an example of a short commit message:
```text
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```text
```
mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang
@@ -478,9 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
```sh
go get github.com/ncw/new_dependency
```
go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
@@ -492,9 +391,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
```sh
go get golang.org/x/crypto
```
go get golang.org/x/crypto
Check in a single commit as above.
@@ -537,38 +434,25 @@ remote or an fs.
### Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and
shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote
more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
make sure we can encode any path name and `rclone info` to help determine the
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
if your backend is HTTP based - this adds features like `--dump bodies`,
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as
possible to each other is a high priority!
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
### Unit tests
@@ -579,20 +463,19 @@ remote or an fs.
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from
the project root:
- go install ./...
- test_all -backends remote
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually:
- Make sure integration tests pass with
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list`
- `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
@@ -604,13 +487,10 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
- `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are
automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your
reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features
table and the Optional Features table.
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
@@ -626,22 +506,21 @@ It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
@@ -662,51 +541,34 @@ For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone
## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go
plugins. Changes will be kept in a dynamically loaded file instead of being
compiled into the main binary. This is useful if you can't merge your changes
upstream or don't want to maintain a fork of rclone.
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
### Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source
of rclone)
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
### Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and
host Go version match.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone
# Maintainers guide for rclone #
Current active maintainers of rclone are:
@@ -24,108 +24,80 @@ Current active maintainers of rclone are:
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
## This is a work in progress draft
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up
of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets
## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
Rclone uses the labels like this:
- `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the
docs add this label
- `duplicate` - normally close these and ask the user to subscribe to the original
- `enhancement: new remote` - a new rclone backend
- `enhancement` - a new feature
- `FUSE` - to do with `rclone mount` command
- `good first issue` - mark these if you find a small self-contained issue -
these get shown to new visitors to the project
- `help` wanted - mark these if you find a self-contained issue - these get
shown to new visitors to the project
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `maintenance` - internal enhancement, code re-organisation, etc.
- `Needs Go 1.XX` - waiting for that version of Go to be released
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
* `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
* `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for
the moment
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
are good candidates for ones that have slipped between the gaps and need
following up.
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release.
Post a link to a beta in the ticket with the fix in, asking for feedback.
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run
`bin/update-authors.py` to update the authors file then `git push`.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true
of contributions of new backends which take a long time to get right.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
if there is something big to merge that didn't stabilize properly or for personal
reasons.
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with
`make update` to give time for bugs to surface.
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let
things settle down.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list
## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on
google groups.
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO
## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers.
I should probably make a dev@rclone.org to register with cloud providers.

49053
MANUAL.html generated

File diff suppressed because it is too large Load Diff

22912
MANUAL.md generated

File diff suppressed because it is too large Load Diff

9035
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -100,7 +100,6 @@ compiletest:
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies
@@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
@@ -246,7 +243,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

274
README.md
View File

@@ -1,6 +1,22 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) |
@@ -18,107 +34,97 @@
# Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
- FTP [:page_facing_up:](https://rclone.org/ftp/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- HTTP [:page_facing_up:](https://rclone.org/http/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- MEGA [:page_facing_up:](https://rclone.org/mega/)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
- Memory [:page_facing_up:](https://rclone.org/memory/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- put.io [:page_facing_up:](https://rclone.org/putio/)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -126,54 +132,50 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features
- MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files
- Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
files
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
identical
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
bidirectionally
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
equality
- Can sync to and from network, e.g. two different cloud accounts
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
- Optional transparent compression ([Compress](https://rclone.org/compress/))
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
* MD5/SHA-1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation
Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/)
- ...and more
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
- <https://rclone.org/downloads/>
* https://rclone.org/downloads/
## License
License
-------
This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package).

View File

@@ -4,55 +4,52 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages
## Making a release
- git checkout master # see below for stable branch
- git pull # IMPORTANT
- git status - make sure everything is checked in
- Check GitHub actions build for master is Green
- make test # see integration test server or run locally
- make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
releases
- make tidy
- make doc
- git status - to check for new man pages - git add them
- git commit -a -v -m "Version v1.XX.0"
- make retag
- git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push --follow-tags origin
- \# Wait for the GitHub builds to complete then...
- make fetch_binaries
- make tarball
- make vendorball
- make sign_upload
- make check_sign
- make upload
- make upload_website
- make upload_github
- make startdev # make startstable for stable branch
- \# announce with forum post, twitter post, patreon post
* git checkout master # see below for stable branch
* git pull # IMPORTANT
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies.
- Review any pinned packages in go.mod and remove if possible
- `make updatedirect`
- `make GOTAGS=cmount`
- `make compiletest`
- Fix anything which doesn't compile at this point and commit changes here
- `git commit -a -v -m "build: update all dependencies"`
* Review any pinned packages in go.mod and remove if possible
* `make updatedirect`
* `make GOTAGS=cmount`
* `make compiletest`
* Fix anything which doesn't compile at this point and commit changes here
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
```text
go 1.22.0
```
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
@@ -60,7 +57,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```sh
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
@@ -70,7 +67,7 @@ If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```sh
```
git co go.mod go.sum
```
@@ -80,12 +77,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
- `make update`
- `make GOTAGS=cmount`
- `make compiletest`
- roll back any updates which didn't compile
- `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod`
* `make update`
* `make GOTAGS=cmount`
* `make compiletest`
* roll back any updates which didn't compile
* `git commit -a -v --amend`
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
@@ -102,9 +99,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
```sh
go run github.com/icholy/gomajor@latest list -major
```
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
@@ -112,9 +107,7 @@ Expect API breakage when updating major versions.
At some point after the release run
```sh
bin/tidy-beta v1.55
```
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
@@ -124,64 +117,54 @@ If rclone needs a point release due to some horrendous bug:
Set vars
- BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
- make startstable
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* make startstable
Now
- git co ${BASE_TAG}-stable
- git cherry-pick any fixes
- make startstable
- Do the steps as above
- git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it
is correct
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git push
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* make startstable
* Do the steps as above
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the
main repository.
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
```sh
git co -b update-website
```
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
```sh
git reset --hard v1.64.0
```
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
```sh
make upload_test_website
```
make upload_test_website
Check out <https://test.rclone.org> and when happy
Check out https://test.rclone.org and when happy
```sh
make upload_website
```
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
@@ -189,14 +172,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
To do a basic build of rclone's docker image to debug builds locally:
```sh
```
docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version
```
To test the multipatform build
```sh
```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
```
@@ -204,6 +187,6 @@ To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```sh
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
```

View File

@@ -1 +1 @@
v1.72.0
v1.70.0

View File

@@ -14,12 +14,10 @@ import (
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"
_ "github.com/rclone/rclone/backend/doi"
_ "github.com/rclone/rclone/backend/drive"
_ "github.com/rclone/rclone/backend/dropbox"
_ "github.com/rclone/rclone/backend/fichier"
_ "github.com/rclone/rclone/backend/filefabric"
_ "github.com/rclone/rclone/backend/filelu"
_ "github.com/rclone/rclone/backend/filescom"
_ "github.com/rclone/rclone/backend/ftp"
_ "github.com/rclone/rclone/backend/gofile"

View File

@@ -51,7 +51,6 @@ import (
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
@@ -73,7 +72,6 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
sasCopyValidity = time.Hour // how long SAS should last when doing server side copy
)
var (
@@ -561,11 +559,6 @@ type Fs struct {
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
publicAccess container.PublicAccessType // Container Public Access Level
// user delegation cache
userDelegationMu sync.Mutex
userDelegation *service.UserDelegationCredential
userDelegationExpiry time.Time
}
// Object describes an azure object
@@ -619,9 +612,6 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") {
containerPath = containerPath[:len(containerPath)-1]
}
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
}
@@ -938,7 +928,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
// User with username and password
//nolint:staticcheck // this is deprecated due to Azure policy
options := azidentity.UsernamePasswordCredentialOptions{
ClientOptions: policyClientOptions,
}
@@ -991,38 +980,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
// Workload Identity based authentication
var options azidentity.ManagedIdentityCredentialOptions
options.ID = azidentity.ClientID(opt.MSIClientID)
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
getClientAssertions := func(context.Context) (string, error) {
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
return token.Token, nil
}
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
f.cred, err = azidentity.NewClientAssertionCredential(
opt.Tenant,
opt.ClientID,
getClientAssertions,
assertOpts)
if err != nil {
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
}
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
f.cred, err = azidentity.NewAzureCLICredential(&options)
@@ -1256,7 +1213,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
continue
}
// process directory markers as directories
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
if addContainer {
@@ -1338,9 +1295,9 @@ func (f *Fs) containerOK(container string) bool {
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if !f.containerOK(containerName) {
return fs.ErrorDirNotFound
return nil, fs.ErrorDirNotFound
}
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -1348,16 +1305,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(containerName)
return nil
return entries, nil
}
// listContainers returns all the containers to out
@@ -1393,47 +1350,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return fs.ErrorListBucketRequired
return nil, fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
}
return f.listContainers(ctx)
}
return list.Flush()
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1610,7 +1534,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
@@ -1760,38 +1684,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.deleteContainer(ctx, container)
}
// Get a user delegation which is valid for at least sasCopyValidity
//
// This value is cached in f
func (f *Fs) getUserDelegation(ctx context.Context) (*service.UserDelegationCredential, error) {
f.userDelegationMu.Lock()
defer f.userDelegationMu.Unlock()
if f.userDelegation != nil && time.Until(f.userDelegationExpiry) > sasCopyValidity {
return f.userDelegation, nil
}
// Validity window
start := time.Now().UTC()
expiry := start.Add(2 * sasCopyValidity)
startStr := start.Format(time.RFC3339)
expiryStr := expiry.Format(time.RFC3339)
// Acquire user delegation key from the service client
info := service.KeyInfo{
Start: &startStr,
Expiry: &expiryStr,
}
userDelegationKey, err := f.svc.GetUserDelegationCredential(ctx, info, nil)
if err != nil {
return nil, fmt.Errorf("failed to get user delegation key: %w", err)
}
f.userDelegation = userDelegationKey
f.userDelegationExpiry = expiry
return f.userDelegation, nil
}
// getAuth gets auth to copy o.
//
// tokenOK is used to signal that token based auth (Microsoft Entra
@@ -1803,7 +1695,7 @@ func (f *Fs) getUserDelegation(ctx context.Context) (*service.UserDelegationCred
// URL (not a SAS) and token will be empty.
//
// If tokenOK is true it may also return a token for the auth.
func (o *Object) getAuth(ctx context.Context, noAuth bool) (srcURL string, err error) {
func (o *Object) getAuth(ctx context.Context, tokenOK bool, noAuth bool) (srcURL string, token *string, err error) {
f := o.fs
srcBlobSVC := o.getBlobSVC()
srcURL = srcBlobSVC.URL()
@@ -1812,47 +1704,29 @@ func (o *Object) getAuth(ctx context.Context, noAuth bool) (srcURL string, err e
case noAuth:
// If same storage account then no auth needed
case f.cred != nil:
// Generate a User Delegation SAS URL using Azure AD credentials
userDelegationKey, err := f.getUserDelegation(ctx)
if !tokenOK {
return srcURL, token, errors.New("not supported: Microsoft Entra ID")
}
options := policy.TokenRequestOptions{}
accessToken, err := f.cred.GetToken(ctx, options)
if err != nil {
return "", fmt.Errorf("sas creation: %w", err)
return srcURL, token, fmt.Errorf("failed to create access token: %w", err)
}
// Build the SAS values
perms := sas.BlobPermissions{Read: true}
container, containerPath := o.split()
start := time.Now().UTC()
expiry := start.Add(sasCopyValidity)
vals := sas.BlobSignatureValues{
StartTime: start,
ExpiryTime: expiry,
Permissions: perms.String(),
ContainerName: container,
BlobName: containerPath,
}
// Sign with the delegation key
queryParameters, err := vals.SignWithUserDelegation(userDelegationKey)
if err != nil {
return "", fmt.Errorf("signing SAS with user delegation failed: %w", err)
}
// Append the SAS to the URL
srcURL = srcBlobSVC.URL() + "?" + queryParameters.Encode()
token = &accessToken.Token
case f.sharedKeyCred != nil:
// Generate a short lived SAS URL if using shared key credentials
expiry := time.Now().Add(sasCopyValidity)
expiry := time.Now().Add(time.Hour)
sasOptions := blob.GetSASURLOptions{}
srcURL, err = srcBlobSVC.GetSASURL(sas.BlobPermissions{Read: true}, expiry, &sasOptions)
if err != nil {
return srcURL, fmt.Errorf("failed to create SAS URL: %w", err)
return srcURL, token, fmt.Errorf("failed to create SAS URL: %w", err)
}
case f.anonymous || f.opt.SASURL != "":
// If using a SASURL or anonymous, no need for any extra auth
default:
return srcURL, errors.New("unknown authentication type")
return srcURL, token, errors.New("unknown authentication type")
}
return srcURL, nil
return srcURL, token, nil
}
// Do multipart parallel copy.
@@ -1873,7 +1747,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
o.fs = f
o.remote = remote
srcURL, err := src.getAuth(ctx, false)
srcURL, token, err := src.getAuth(ctx, true, false)
if err != nil {
return nil, fmt.Errorf("multipart copy: %w", err)
}
@@ -1894,7 +1768,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
var (
srcSize = src.size
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
numParts = (srcSize + partSize - 1) / partSize
numParts = (srcSize-1)/partSize + 1
blockIDs = make([]string, numParts) // list of blocks for finalize
g, gCtx = errgroup.WithContext(ctx)
checker = newCheckForInvalidBlockOrBlob("copy", o)
@@ -1917,8 +1791,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
Count: partSize,
},
// Specifies the authorization scheme and signature for the copy source.
// We use SAS URLs as this doesn't seem to work always
// CopySourceAuthorization: token,
CopySourceAuthorization: token,
// CPKInfo *blob.CPKInfo
// CPKScopeInfo *blob.CPKScopeInfo
}
@@ -1988,7 +1861,7 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
dstBlobSVC := f.getBlobSVC(dstContainer, dstPath)
// Get the source auth - none needed for same storage account
srcURL, err := src.getAuth(ctx, f == src.fs)
srcURL, _, err := src.getAuth(ctx, false, f == src.fs)
if err != nil {
return nil, fmt.Errorf("single part copy: source auth: %w", err)
}
@@ -2152,6 +2025,7 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
}
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
v := v
metadata[k] = &v
}
return metadata
@@ -2302,6 +2176,11 @@ func (o *Object) getTags() (tags map[string]string) {
// getBlobSVC creates a blob client
func (o *Object) getBlobSVC() *blob.Client {
container, directory := o.split()
// If we are trying to remove an all / directory marker then
// this will have one / too many now.
if bucket.IsAllSlashes(o.remote) {
directory = strings.TrimSuffix(directory, "/")
}
return o.fs.getBlobSVC(container, directory)
}
@@ -2703,13 +2582,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
@@ -2797,8 +2669,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
blockList blockblob.GetBlockListResponse
properties *blob.GetPropertiesResponse
options *blockblob.CommitBlockListOptions
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
)
properties, err = o.readMetaDataAlways(ctx)
@@ -2810,7 +2680,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
if objectExists {
// Get the committed block list
err = pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
return o.fs.shouldRetry(ctx, err)
})
@@ -2852,7 +2722,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
// Commit only the committed blocks
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
err = pacer.Call(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
return o.fs.shouldRetry(ctx, err)
})
@@ -2993,9 +2863,6 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
return ui, err
}
}
// if ui.isDirMarker && strings.HasSuffix(containerPath, "//") {
// containerPath = containerPath[:len(containerPath)-1]
// }
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
@@ -3188,7 +3055,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -453,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
options := azidentity.AzureCLICredentialOptions{}
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
@@ -516,7 +516,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
}
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
// User with username and password
//nolint:staticcheck // this is deprecated due to Azure policy
options := azidentity.UsernamePasswordCredentialOptions{
ClientOptions: policyClientOptions,
}
@@ -550,7 +549,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
b2i := map[bool]int{false: 0, true: 1}
var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set")
@@ -569,37 +568,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
case opt.ClientID != "" && opt.Tenant != "" && opt.MSIClientID != "":
// Workload Identity based authentication
var options azidentity.ManagedIdentityCredentialOptions
options.ID = azidentity.ClientID(opt.MSIClientID)
msiCred, err := azidentity.NewManagedIdentityCredential(&options)
if err != nil {
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
}
getClientAssertions := func(context.Context) (string, error) {
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
return token.Token, nil
}
assertOpts := &azidentity.ClientAssertionCredentialOptions{}
cred, err = azidentity.NewClientAssertionCredential(
opt.Tenant,
opt.ClientID,
getClientAssertions,
assertOpts)
if err != nil {
return nil, fmt.Errorf("failed to acquire client assertion token: %w", err)
}
default:
return nil, errors.New("no authentication method configured")
}
@@ -854,7 +822,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return entries, err
}
opt := &directory.ListFilesAndDirectoriesOptions{
var opt = &directory.ListFilesAndDirectoriesOptions{
Include: directory.ListFilesInclude{
Timestamps: true,
},
@@ -953,7 +921,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
}
}
// getMetadata gets the metadata if it hasn't already been fetched
// readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) getMetadata(ctx context.Context) error {
resp, err := o.fileClient().GetProperties(ctx, nil)
if err != nil {
@@ -1013,10 +981,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
SMBProperties: &file.SMBProperties{
LastWriteTime: &t,
},
HTTPHeaders: &file.HTTPHeaders{
ContentMD5: o.md5,
ContentType: &o.contentType,
},
}
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
if err != nil {

View File

@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
last := ""
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -890,46 +890,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1705,21 +1673,6 @@ func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
return o.getMetaDataListing(ctx)
}
}
// If using versionAt we need to list the find the correct version.
if o.fs.opt.VersionAt.IsSet() {
info, err := o.getMetaDataListing(ctx)
if err != nil {
return nil, err
}
if info.Action == "hide" {
// Rerturn object not found error if the current version is deleted.
return nil, fs.ErrorObjectNotFound
}
return info, nil
}
_, info, err = o.getOrHead(ctx, "HEAD", nil)
return info, err
}
@@ -1930,14 +1883,9 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
// --b2-download-url cloudflare strips the Content-Length
// headers (presumably so it can inject stuff) so use the old
// length read from the listing.
// Additionally, the official examples return S3 headers
// instead of native, i.e. no file ID, use ones from listing.
if info.Size < 0 {
info.Size = o.size
}
if info.ID == "" {
info.ID = o.id
}
return resp, info, nil
}
@@ -2460,7 +2408,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}

View File

@@ -446,14 +446,14 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
t.Run("List", func(t *testing.T) {
fstest.CheckListing(t, f, test.want)
})
t.Run("NewObject", func(t *testing.T) {
gotObj, gotErr := f.NewObject(ctx, fileName)
assert.Equal(t, test.wantErr, gotErr)
if gotErr == nil {
assert.Equal(t, test.wantSize, gotObj.Size())
}
})
// b2 NewObject doesn't work with VersionAt
//t.Run("NewObject", func(t *testing.T) {
// gotObj, gotErr := f.NewObject(ctx, fileName)
// assert.Equal(t, test.wantErr, gotErr)
// if gotErr == nil {
// assert.Equal(t, test.wantSize, gotObj.Size())
// }
//})
})
}
})

View File

@@ -125,21 +125,10 @@ type FolderItems struct {
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually
// returned. The []struct has worked for many years, but in
// https://github.com/rclone/rclone/issues/8776 box was
// returning it returned not as a list. We don't actually use
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
@@ -282,9 +271,9 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`

View File

@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:

View File

@@ -1861,8 +1861,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
ctx, ci := fs.AddConfig(ctx)
ci.NameTransform = nil // ensure operations.Move does not double-transform here
var (
dest fs.Object
err error

View File

@@ -187,6 +187,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
@@ -240,22 +241,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove, slowHash := true, false
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
slowHash = slowHash || u.f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
@@ -369,6 +366,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
@@ -635,6 +633,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)
@@ -859,7 +858,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
}
return wrappedCallback(entries)
}
return listP(ctx, uRemote, wrappedCallback)
return listP(ctx, dir, wrappedCallback)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -1,38 +0,0 @@
// Type definitions specific to Dataverse
package api
// DataverseDatasetResponse is returned by the Dataverse dataset API
type DataverseDatasetResponse struct {
Status string `json:"status"`
Data DataverseDataset `json:"data"`
}
// DataverseDataset is the representation of a dataset
type DataverseDataset struct {
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
}
// DataverseDatasetVersion is the representation of a dataset version
type DataverseDatasetVersion struct {
LastUpdateTime string `json:"lastUpdateTime"`
Files []DataverseFile `json:"files"`
}
// DataverseFile is the representation of a file found in a dataset
type DataverseFile struct {
DirectoryLabel string `json:"directoryLabel"`
DataFile DataverseDataFile `json:"dataFile"`
}
// DataverseDataFile represents file metadata details
type DataverseDataFile struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
ContentType string `json:"contentType"`
FileSize int64 `json:"filesize"`
OriginalFileFormat string `json:"originalFileFormat"`
OriginalFileSize int64 `json:"originalFileSize"`
OriginalFileName string `json:"originalFileName"`
MD5 string `json:"md5"`
}

View File

@@ -1,33 +0,0 @@
// Type definitions specific to InvenioRDM
package api
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
type InvenioRecordResponse struct {
Links InvenioRecordResponseLinks `json:"links"`
}
// InvenioRecordResponseLinks represents a record's links
type InvenioRecordResponseLinks struct {
Self string `json:"self"`
}
// InvenioFilesResponse is the representation of a record's files
type InvenioFilesResponse struct {
Entries []InvenioFilesResponseEntry `json:"entries"`
}
// InvenioFilesResponseEntry is the representation of a file entry
type InvenioFilesResponseEntry struct {
Key string `json:"key"`
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Updated string `json:"updated"`
MimeType string `json:"mimetype"`
Links InvenioFilesResponseEntryLinks `json:"links"`
}
// InvenioFilesResponseEntryLinks represents file links details
type InvenioFilesResponseEntryLinks struct {
Content string `json:"content"`
}

View File

@@ -1,26 +0,0 @@
// Package api has general type definitions for doi
package api
// DoiResolverResponse is returned by the DOI resolver API
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
type DoiResolverResponse struct {
ResponseCode int `json:"responseCode"`
Handle string `json:"handle"`
Values []DoiResolverResponseValue `json:"values"`
}
// DoiResolverResponseValue is a single handle record value
type DoiResolverResponseValue struct {
Index int `json:"index"`
Type string `json:"type"`
Data DoiResolverResponseValueData `json:"data"`
TTL int `json:"ttl"`
Timestamp string `json:"timestamp"`
}
// DoiResolverResponseValueData is the data held in a handle value
type DoiResolverResponseValueData struct {
Format string `json:"format"`
Value any `json:"value"`
}

View File

@@ -1,112 +0,0 @@
// Implementation for Dataverse
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
return persistentID != ""
}
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
queryValues := resolvedURL.Query()
persistentID := queryValues.Get("persistentId")
query := url.Values{}
query.Add("persistentId", persistentID)
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
return Dataverse, endpointURL, nil
}
// dataverseProvider implements the doiProvider interface for Dataverse installations
type dataverseProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := dp.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := dp.f.endpoint
var res *http.Response
var result api.DataverseDatasetResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
Parameters: filesURL.Query(),
}
err = dp.f.pacer.Call(func() (bool, error) {
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
if modTimeErr != nil {
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
for _, file := range result.Data.LatestVersion.Files {
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
query := url.Values{}
query.Add("format", "original")
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
entry := &Object{
fs: dp.f,
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
contentURL: contentURL.String(),
size: file.DataFile.FileSize,
modTime: modTime,
md5: file.DataFile.MD5,
contentType: file.DataFile.ContentType,
}
if file.DataFile.OriginalFileName != "" {
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
entry.size = file.DataFile.OriginalFileSize
entry.contentType = file.DataFile.OriginalFileFormat
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
dp.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newDataverseProvider(f *Fs) doiProvider {
return &dataverseProvider{
f: f,
}
}

View File

@@ -1,649 +0,0 @@
// Package doi provides a filesystem interface for digital objects identified by DOIs.
//
// See: https://www.doi.org/the-identifier/what-is-a-doi/
package doi
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/cache"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
const (
// the URL of the DOI resolver
//
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
doiResolverAPIURL = "https://doi.org/api"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
errorReadOnly = errors.New("doi remotes are read only")
timeUnset = time.Unix(0, 0)
)
func init() {
fsi := &fs.RegInfo{
Name: "doi",
Description: "DOI datasets",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "doi",
Help: "The DOI or the doi.org URL.",
Required: true,
}, {
Name: fs.ConfigProvider,
Help: `DOI provider.
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
Examples: []fs.OptionExample{
{
Value: "auto",
Help: "Auto-detect provider",
},
{
Value: string(Zenodo),
Help: "Zenodo",
}, {
Value: string(Dataverse),
Help: "Dataverse",
}, {
Value: string(Invenio),
Help: "Invenio",
}},
Required: false,
Advanced: true,
}, {
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,
Advanced: true,
}},
}
fs.Register(fsi)
}
// Provider defines the type of provider hosting the DOI
type Provider string
const (
// Zenodo provider, see https://zenodo.org
Zenodo Provider = "zenodo"
// Dataverse provider, see https://dataverse.harvard.edu
Dataverse Provider = "dataverse"
// Invenio provider, see https://inveniordm.docs.cern.ch
Invenio Provider = "invenio"
)
// Options defines the configuration for this backend
type Options struct {
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
Provider string `config:"provider"` // The DOI provider
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
}
// Fs stores the interface to the remote HTTP files
type Fs struct {
name string // name of this remote
root string // the path we are working on
provider Provider // the DOI provider
doiProvider doiProvider // the interface used to interact with the DOI provider
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
endpoint *url.URL // the main API endpoint for this remote
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
pacer *fs.Pacer // pacer for API calls
cache *cache.Cache // a cache for the remote metadata
}
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs // what this object is part of
remote string // the remote path
contentURL string // the URL where the contents of the file can be downloaded
size int64 // size of the object
modTime time.Time // modification time of the object
contentType string // content type of the object
md5 string // MD5 hash of the object content
}
// doiProvider is the interface used to list objects in a DOI
type doiProvider interface {
// ListEntries returns the full list of entries found at the remote, regardless of root
ListEntries(ctx context.Context) (entries []*Object, err error)
}
// Parse the input string as a DOI
// Examples:
// 10.1000/182 -> 10.1000/182
// https://doi.org/10.1000/182 -> 10.1000/182
// doi:10.1000/182 -> 10.1000/182
func parseDoi(doi string) string {
doiURL, err := url.Parse(doi)
if err != nil {
return doi
}
if doiURL.Scheme == "doi" {
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
}
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
return strings.TrimLeft(doiURL.Path, "/")
}
return doi
}
// Resolve a DOI to a URL
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
resolverURL := opt.DoiResolverAPIURL
if resolverURL == "" {
resolverURL = doiResolverAPIURL
}
var result api.DoiResolverResponse
params := url.Values{}
params.Add("index", "1")
opts := rest.Opts{
Method: "GET",
RootURL: resolverURL,
Path: "/handles/" + opt.Doi,
Parameters: params,
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.ResponseCode != 1 {
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
}
resolvedURLStr := ""
for _, value := range result.Values {
if value.Type == "URL" && value.Data.Format == "string" {
valueStr, ok := value.Data.Value.(string)
if !ok {
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
}
resolvedURLStr = valueStr
}
}
resolvedURL, err := url.Parse(resolvedURLStr)
if err != nil {
return nil, err
}
return resolvedURL, nil
}
// Resolve the passed configuration into a provider and enpoint
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
if err != nil {
return "", nil, err
}
switch opt.Provider {
case string(Dataverse):
return resolveDataverseEndpoint(resolvedURL)
case string(Invenio):
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
case string(Zenodo):
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
hostname := strings.ToLower(resolvedURL.Hostname())
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
return resolveDataverseEndpoint(resolvedURL)
}
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
}
if activateInvenio(ctx, srv, pacer, resolvedURL) {
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
}
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
}
// Make the http connection from the passed options
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
if err != nil {
return false, err
}
// Update f with the new parameters
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
f.endpoint = endpoint
f.endpointURL = endpoint.String()
f.provider = provider
f.opt.Provider = string(provider)
switch f.provider {
case Dataverse:
f.doiProvider = newDataverseProvider(f)
case Invenio, Zenodo:
f.doiProvider = newInvenioProvider(f)
default:
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
}
// Determine if the root is a file
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return false, err
}
for _, entry := range entries {
if entry.remote == f.root {
isFile = true
break
}
}
return isFile, nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this res and err
// deserve to be retried. It returns the err as a convenience.
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
root = strings.Trim(root, "/")
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
opt.Doi = parseDoi(opt.Doi)
client := fshttp.NewClient(ctx)
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
ci: ci,
srv: rest.NewClient(client),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
cache: cache.New(),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
isFile, err := f.httpConnection(ctx, opt)
if err != nil {
return nil, err
}
if isFile {
// return an error with an fs which points to the parent
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.root = newRoot
return f, fs.ErrorIsFile
}
return f, nil
}
// Name returns the configured name of the file system
func (f *Fs) Name() string {
return f.name
}
// Root returns the root for the filesystem
func (f *Fs) Root() string {
return f.root
}
// String returns the URL for the filesystem
func (f *Fs) String() string {
return fmt.Sprintf("DOI %s", f.opt.Doi)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
// return hash.Set(hash.None)
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// Remove a remote http file object
func (o *Object) Remove(ctx context.Context) error {
return errorReadOnly
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return errorReadOnly
}
// NewObject creates a new remote http file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
entries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, err
}
remoteFullPath := remote
if f.root != "" {
remoteFullPath = path.Join(f.root, remote)
}
for _, entry := range entries {
if entry.Remote() == remoteFullPath {
return entry, nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fileEntries, err := f.doiProvider.ListEntries(ctx)
if err != nil {
return nil, fmt.Errorf("error listing %q: %w", dir, err)
}
fullDir := path.Join(f.root, dir)
if fullDir != "" {
fullDir += "/"
}
dirPaths := map[string]bool{}
for _, entry := range fileEntries {
// First, filter out files not in `fullDir`
if !strings.HasPrefix(entry.remote, fullDir) {
continue
}
// Then, find entries in subfolers
remotePath := entry.remote
if fullDir != "" {
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
}
parts := strings.SplitN(remotePath, "/", 2)
if len(parts) == 1 {
newEntry := *entry
newEntry.remote = path.Join(dir, remotePath)
entries = append(entries, &newEntry)
} else {
dirPaths[path.Join(dir, parts[0])] = true
}
}
for dirPath := range dirPaths {
entry := fs.NewDir(dirPath, time.Time{})
entries = append(entries, entry)
}
return entries, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return nil, errorReadOnly
}
// Fs is the filesystem this remote http file object is located within
func (o *Object) Fs() fs.Info {
return o.fs
}
// String returns the URL to the remote HTTP file
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
return o.remote
}
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5, nil
}
// Size returns the size in bytes of the remote http file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the remote http file
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification and access time to the specified time
//
// it also updates the info field
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return errorReadOnly
}
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
func (o *Object) Storable() bool {
return true
}
// Open a remote http file object for reading. Seek is supported
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
RootURL: o.contentURL,
Options: options,
}
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
// Handle non-compliant redirects
if res.Header.Get("Location") != "" {
newURL, err := res.Location()
if err == nil {
opts.RootURL = newURL.String()
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
}
}
return res.Body, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return errorReadOnly
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
var commandHelp = []fs.CommandHelp{{
Name: "metadata",
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
rclone backend medatadata doi:
It returns a JSON object representing metadata about the DOI.
`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage Examples:
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
The option keys are named as they are in the config file.
This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.
`,
}}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
case "set":
newOpt := f.opt
err := configstruct.Set(configmap.Simple(opt), &newOpt)
if err != nil {
return nil, fmt.Errorf("reading config: %w", err)
}
_, err = f.httpConnection(ctx, &newOpt)
if err != nil {
return nil, fmt.Errorf("updating session: %w", err)
}
f.opt = newOpt
keys := []string{}
for k := range opt {
keys = append(keys, k)
}
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err
}
info := map[string]any{}
info["DOI"] = f.opt.Doi
info["URL"] = doiURL.String()
info["metadataURL"] = f.endpointURL
info["provider"] = f.provider
return info, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Commander = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
)

View File

@@ -1,260 +0,0 @@
package doi
import (
"context"
"crypto/md5"
"encoding/hex"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var remoteName = "TestDoi"
func TestParseDoi(t *testing.T) {
// 10.1000/182 -> 10.1000/182
doi := "10.1000/182"
parsed := parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://doi.org/10.1000/182 -> 10.1000/182
doi = "https://doi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// https://dx.doi.org/10.1000/182 -> 10.1000/182
doi = "https://dxdoi.org/10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi:10.1000/182 -> 10.1000/182
doi = "doi:10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
// doi://10.1000/182 -> 10.1000/182
doi = "doi://10.1000/182"
parsed = parseDoi(doi)
assert.Equal(t, "10.1000/182", parsed)
}
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
mux := http.NewServeMux()
// Handle requests for resolving DOIs
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are resolving a DOI
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
assert.NotEmpty(t, handle)
index := r.URL.Query().Get("index")
assert.Equal(t, "1", index)
// Return the most basic response
result := api.DoiResolverResponse{
ResponseCode: 1,
Handle: handle,
Values: []api.DoiResolverResponseValue{
{
Index: 1,
Type: "URL",
Data: api.DoiResolverResponseValueData{
Format: "string",
Value: resolvedURL,
},
},
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts.URL + "/api"
}
func md5Sum(text string) string {
hash := md5.Sum([]byte(text))
return hex.EncodeToString(hash[:])
}
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
mux := http.NewServeMux()
// Handle requests for a single record
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning data about a single record
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
assert.NotEmpty(t, recordID)
// Return the most basic response
selfURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
selfURL = selfURL.JoinPath(r.URL.String())
result := api.InvenioRecordResponse{
Links: api.InvenioRecordResponseLinks{
Self: selfURL.String(),
},
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for listing files in a record
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
// Return the most basic response
filesBaseURL, err := url.Parse("http://" + r.Host)
require.NoError(t, err)
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
entries := []api.InvenioFilesResponseEntry{}
for filename, contents := range files {
entries = append(entries,
api.InvenioFilesResponseEntry{
Key: filename,
Checksum: md5Sum(contents),
Size: int64(len(contents)),
Updated: time.Now().UTC().Format(time.RFC3339),
MimeType: "text/plain; charset=utf-8",
Links: api.InvenioFilesResponseEntryLinks{
Content: filesBaseURL.JoinPath(filename).String(),
},
},
)
}
result := api.InvenioFilesResponse{
Entries: entries,
}
resultBytes, err := json.Marshal(result)
require.NoError(t, err)
w.Header().Add("Content-Type", "application/json")
_, err = w.Write(resultBytes)
require.NoError(t, err)
})
// Handle requests for file contents
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
// Check that we are returning the contents of a file
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
assert.NotEmpty(t, filename)
contents, found := files[filename]
if !found {
w.WriteHeader(404)
return
}
// Return the most basic response
_, err := w.Write([]byte(contents))
require.NoError(t, err)
})
// Make the test server
ts := httptest.NewServer(mux)
// Close the server at the end of the test
t.Cleanup(ts.Close)
return ts
}
func TestZenodoRemote(t *testing.T) {
recordID := "2600782"
doi := "10.5281/zenodo.2600782"
// The files in the dataset
files := map[string]string{
"README.md": "This is a dataset.",
"data.txt": "Some data",
}
ts := prepareMockZenodoServer(t, files)
resolvedURL := ts.URL + "/record/" + recordID
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
testConfig := configmap.Simple{
"type": "doi",
"doi": doi,
"provider": "zenodo",
"doi_resolver_api_url": doiResolverAPIURL,
}
f, err := NewFs(context.Background(), remoteName, "", testConfig)
require.NoError(t, err)
// Test listing the DOI files
entries, err := f.List(context.Background(), "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, len(files), len(entries))
e := entries[0]
assert.Equal(t, "README.md", e.Remote())
assert.Equal(t, int64(18), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "data.txt", e.Remote())
assert.Equal(t, int64(9), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
// Test reading the DOI files
o, err := f.NewObject(context.Background(), "README.md")
require.NoError(t, err)
assert.Equal(t, int64(18), o.Size())
md5Hash, err := o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["README.md"]), data)
do, ok := o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
o, err = f.NewObject(context.Background(), "data.txt")
require.NoError(t, err)
assert.Equal(t, int64(9), o.Size())
md5Hash, err = o.Hash(context.Background(), hash.MD5)
require.NoError(t, err)
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
fd, err = o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, []byte(files["data.txt"]), data)
do, ok = o.(fs.MimeTyper)
require.True(t, ok)
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
}

View File

@@ -1,16 +0,0 @@
// Test DOI filesystem interface
package doi
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestDoi:",
NilObject: (*Object)(nil),
})
}

View File

@@ -1,164 +0,0 @@
// Implementation for InvenioRDM
package doi
import (
"context"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
return err == nil
}
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
var res *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err = srv.Call(ctx, &opts)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
// First, attempt to grab the API URL from the headers
var linksetURL *url.URL
links := parseLinkHeader(res.Header.Get("Link"))
for _, link := range links {
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
parsed, err := url.Parse(link.Href)
if err == nil {
linksetURL = parsed
break
}
}
}
if linksetURL != nil {
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
}
// If there is no linkset header, try to grab the record ID from the URL
recordID := ""
resURL := res.Request.URL
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
if match != nil {
recordID = match[1]
guessedURL := res.Request.URL.ResolveReference(&url.URL{
Path: "/api/records/" + recordID,
})
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
if err == nil {
return Invenio, endpoint, nil
}
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
}
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
}
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: resolvedURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, err
}
if result.Links.Self == "" {
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
}
return url.Parse(result.Links.Self)
}
// invenioProvider implements the doiProvider interface for InvenioRDM installations
type invenioProvider struct {
f *Fs
}
// ListEntries returns the full list of entries found at the remote, regardless of root
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
// Use the cache if populated
cachedEntries, found := ip.f.cache.GetMaybe("files")
if found {
parsedEntries, ok := cachedEntries.([]Object)
if ok {
for _, entry := range parsedEntries {
newEntry := entry
entries = append(entries, &newEntry)
}
return entries, nil
}
}
filesURL := ip.f.endpoint.JoinPath("files")
var result api.InvenioFilesResponse
opts := rest.Opts{
Method: "GET",
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
}
err = ip.f.pacer.Call(func() (bool, error) {
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return nil, fmt.Errorf("readDir failed: %w", err)
}
for _, file := range result.Entries {
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
if modTimeErr != nil {
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
modTime = timeUnset
}
entry := &Object{
fs: ip.f,
remote: file.Key,
contentURL: file.Links.Content,
size: file.Size,
modTime: modTime,
contentType: file.MimeType,
md5: strings.TrimPrefix(file.Checksum, "md5:"),
}
entries = append(entries, entry)
}
// Populate the cache
cacheEntries := []Object{}
for _, entry := range entries {
cacheEntries = append(cacheEntries, *entry)
}
ip.f.cache.Put("files", cacheEntries)
return entries, nil
}
func newInvenioProvider(f *Fs) doiProvider {
return &invenioProvider{
f: f,
}
}

View File

@@ -1,75 +0,0 @@
package doi
import (
"regexp"
"strings"
)
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
// headerLink represents a link as presented in HTTP headers
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
type headerLink struct {
Href string
Rel string
Type string
Extras map[string]string
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
links = append(links, *parsed)
}
}
return links
}
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}
match := linkRegex.FindStringSubmatch(parts[0])
if match == nil {
return nil
}
result := &headerLink{
Href: match[1],
Extras: map[string]string{},
}
for _, keyValue := range parts[1:] {
parsed := parseKeyValue(keyValue)
if parsed != nil {
key, value := parsed[0], parsed[1]
switch strings.ToLower(key) {
case "rel":
result.Rel = value
case "type":
result.Type = value
default:
result.Extras[key] = value
}
}
}
return result
}
func parseKeyValue(keyValue string) []string {
parts := strings.SplitN(keyValue, "=", 2)
if parts[0] == "" || len(parts) < 2 {
return nil
}
match := valueRegex.FindStringSubmatch(parts[1])
if match != nil {
parts[1] = match[1]
return parts
}
return parts
}

View File

@@ -1,44 +0,0 @@
package doi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestParseLinkHeader(t *testing.T) {
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
links := parseLinkHeader(header)
expected := headerLink{
Href: "https://zenodo.org/api/records/15063252",
Rel: "linkset",
Type: "application/linkset+json",
Extras: map[string]string{},
}
assert.Contains(t, links, expected)
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
links = parseLinkHeader(header)
expectedList := []headerLink{{
Href: "https://api.example.com/issues?page=2",
Rel: "prev",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=4",
Rel: "next",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=10",
Rel: "last",
Type: "",
Extras: map[string]string{},
}, {
Href: "https://api.example.com/issues?page=1",
Rel: "first",
Type: "",
Extras: map[string]string{},
}}
assert.Equal(t, links, expectedList)
}

View File

@@ -1,47 +0,0 @@
// Implementation for Zenodo
package doi
import (
"context"
"fmt"
"net/url"
"regexp"
"github.com/rclone/rclone/backend/doi/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
// Resolve the main API endpoint for a DOI hosted on Zenodo
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
match := zenodoRecordRegex.FindStringSubmatch(doi)
if match == nil {
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
}
recordID := match[1]
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
var result api.InvenioRecordResponse
opts := rest.Opts{
Method: "GET",
RootURL: endpointURL.String(),
}
err = pacer.Call(func() (bool, error) {
res, err := srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, res, err)
})
if err != nil {
return "", nil, err
}
endpointURL, err = url.Parse(result.Links.Self)
if err != nil {
return "", nil, err
}
return Zenodo, endpointURL, nil
}

View File

@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for scope := range strings.SplitSeq(scopesString, ",") {
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for extension := range strings.SplitSeq(extensionText, ",") {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
}
var updateMetadata updateMetadataFn
if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
}
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
}
dirID = actualID(dirID)
updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
}

View File

@@ -386,6 +386,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
@@ -506,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
@@ -519,6 +520,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)
@@ -530,9 +532,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
switch k {
case "copy-requires-writer-permission":
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
@@ -629,7 +629,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}

View File

@@ -1446,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
}
usage = &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(used), // bytes in use
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@@ -1,81 +0,0 @@
// Package api defines types for interacting with the FileLu API.
package api
import "encoding/json"
// CreateFolderResponse represents the response for creating a folder.
type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID any `json:"fld_id"`
} `json:"result"`
}
// DeleteFolderResponse represents the response for deleting a folder.
type DeleteFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// FolderListResponse represents the response for listing folders.
type FolderListResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
Files []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
FileCode string `json:"file_code"`
Size int64 `json:"size"`
} `json:"files"`
Folders []struct {
Name string `json:"name"`
FldID json.Number `json:"fld_id"`
Path string `json:"path"`
} `json:"folders"`
} `json:"result"`
}
// FileDirectLinkResponse represents the response for a direct link to a file.
type FileDirectLinkResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
URL string `json:"url"`
Size int64 `json:"size"`
} `json:"result"`
}
// FileInfoResponse represents the response for file information.
type FileInfoResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Size string `json:"size"`
Name string `json:"name"`
FileCode string `json:"filecode"`
Hash string `json:"hash"`
Status int `json:"status"`
} `json:"result"`
}
// DeleteFileResponse represents the response for deleting a file.
type DeleteFileResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
// AccountInfoResponse represents the response for account information.
type AccountInfoResponse struct {
Status int `json:"status"` // HTTP status code of the response.
Msg string `json:"msg"` // Message describing the response.
Result struct {
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
Email string `json:"email"` // User's email address.
UType string `json:"utype"` // User type (e.g., premium or free).
Storage string `json:"storage"` // Total storage available to the user.
StorageUsed string `json:"storage_used"` // Amount of storage used.
} `json:"result"` // Nested result structure containing account details.
}

View File

@@ -1,366 +0,0 @@
// Package filelu provides an interface to the FileLu storage system.
package filelu
import (
"context"
"fmt"
"io"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
// Register the backend with Rclone
func init() {
fs.Register(&fs.RegInfo{
Name: "filelu",
Description: "FileLu Cloud Storage",
NewFs: NewFs,
Options: []fs.Option{{
Name: "key",
Help: "Your FileLu Rclone key from My Account",
Required: true,
Sensitive: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeExclamation |
encoder.EncodeDoubleQuote |
encoder.EncodeSingleQuote |
encoder.EncodeBackQuote |
encoder.EncodeQuestion |
encoder.EncodeDollar |
encoder.EncodeColon |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeCrLf |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeLeftSpace |
encoder.EncodeLeftPeriod |
encoder.EncodeLeftTilde |
encoder.EncodeLeftCrLfHtVt |
encoder.EncodeRightPeriod |
encoder.EncodeRightCrLfHtVt |
encoder.EncodeSquareBracket |
encoder.EncodeSemicolon |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
}})
}
// Options defines the configuration for the FileLu backend
type Options struct {
Key string `config:"key"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents the FileLu file system
type Fs struct {
name string
root string
opt Options
features *fs.Features
endpoint string
pacer *pacer.Pacer
srv *rest.Client
client *http.Client
targetFile string
}
// NewFs creates a new Fs object for FileLu
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, fmt.Errorf("failed to parse config: %w", err)
}
if opt.Key == "" {
return nil, fmt.Errorf("FileLu Rclone Key is required")
}
client := fshttp.NewClient(ctx)
if strings.TrimSpace(root) == "" {
root = ""
}
root = strings.Trim(root, "/")
filename := ""
f := &Fs{
name: name,
opt: *opt,
endpoint: "https://filelu.com/rclone",
client: client,
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
pacer: pacer.New(),
targetFile: filename,
root: root,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
WriteMetadata: false,
SlowHash: true,
}).Fill(ctx, f)
rootContainer, rootDirectory := rootSplit(f.root)
if rootContainer != "" && rootDirectory != "" {
// Check to see if the (container,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.root = strings.Trim(newRoot, "/")
_, err := f.NewObject(ctx, leaf)
if err != nil {
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f.root = strings.Trim(oldRoot, "/")
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// Mkdir to create directory on remote server.
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fullPath := path.Clean(f.root + "/" + dir)
_, err := f.createFolder(ctx, fullPath)
return err
}
// About provides usage statistics for the remote
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
accountInfo, err := f.getAccountInfo(ctx)
if err != nil {
return nil, err
}
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
if err != nil {
return nil, fmt.Errorf("failed to parse total storage: %w", err)
}
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
if err != nil {
return nil, fmt.Errorf("failed to parse used storage: %w", err)
}
return &fs.Usage{
Total: fs.NewUsageValue(totalStorage), // Total bytes available
Used: fs.NewUsageValue(usedStorage), // Total bytes used
Free: fs.NewUsageValue(totalStorage - usedStorage),
}, nil
}
// Purge deletes the directory and all its contents
func (f *Fs) Purge(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
return f.deleteFolder(ctx, fullPath)
}
// List returns a list of files and folders
// List returns a list of files and folders for the given directory
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
// Compose full path for API call
fullPath := path.Join(f.root, dir)
fullPath = "/" + strings.Trim(fullPath, "/")
if fullPath == "/" {
fullPath = ""
}
var entries fs.DirEntries
result, err := f.getFolderList(ctx, fullPath)
if err != nil {
return nil, err
}
fldMap := map[string]bool{}
for _, folder := range result.Result.Folders {
fldMap[folder.FldID.String()] = true
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
continue
}
paths := strings.Split(folder.Path, fullPath+"/")
remote := paths[0]
if len(paths) > 1 {
remote = paths[1]
}
if strings.Contains(remote, "/") {
continue
}
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
remotePathWithoutRoot := pathsWithoutRoot[0]
if len(pathsWithoutRoot) > 1 {
remotePathWithoutRoot = pathsWithoutRoot[1]
}
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
}
for _, file := range result.Result.Files {
if _, ok := fldMap[file.FldID.String()]; ok {
continue
}
remote := path.Join(dir, file.Name)
// trim leading slashes
remote = strings.TrimPrefix(remote, "/")
obj := &Object{
fs: f,
remote: remote,
size: file.Size,
modTime: time.Now(),
}
entries = append(entries, obj)
}
return entries, nil
}
// Put uploads a file directly to the destination folder in the FileLu storage system.
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
err := f.uploadFile(ctx, in, src.Remote())
if err != nil {
return nil, err
}
newObject := &Object{
fs: f,
remote: src.Remote(),
size: src.Size(),
modTime: src.ModTime(ctx),
}
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
return newObject, nil
}
// Move moves the file to the specified location
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
dir := path.Dir(destinationPath)
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to create destination directory: %w", err)
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source file: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
dest, err := os.Create(destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to create destination file: %w", err)
}
defer func() {
if err := dest.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
if _, err := io.Copy(dest, reader); err != nil {
return nil, fmt.Errorf("failed to copy file content: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to remove source file: %w", err)
}
return nil, nil
}
reader, err := src.Open(ctx)
if err != nil {
return nil, fmt.Errorf("failed to open source object: %w", err)
}
defer func() {
if err := reader.Close(); err != nil {
fs.Logf(nil, "Failed to close file body: %v", err)
}
}()
err = f.uploadFile(ctx, reader, destinationPath)
if err != nil {
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
}
if err := src.Remove(ctx); err != nil {
return nil, fmt.Errorf("failed to delete source file: %w", err)
}
return &Object{
fs: f,
remote: destinationPath,
size: src.Size(),
modTime: src.ModTime(ctx),
}, nil
}
// Rmdir removes a directory
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
fullPath := path.Join(f.root, dir)
if fullPath != "" {
fullPath = "/" + strings.Trim(fullPath, "/")
}
// Step 1: Check if folder is empty
listResp, err := f.getFolderList(ctx, fullPath)
if err != nil {
return err
}
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
}
// Step 2: Delete the folder
return f.deleteFolder(ctx, fullPath)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,324 +0,0 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/rclone/rclone/backend/filelu/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/rest"
)
// createFolder creates a folder at the specified path.
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
encodedDir := f.fromStandardPath(dirPath)
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
)
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
var resp *http.Response
result := api.CreateFolderResponse{}
err = f.pacer.Call(func() (bool, error) {
var innerErr error
resp, innerErr = f.client.Do(req)
return fserrors.ShouldRetry(innerErr), innerErr
})
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
return &result, nil
}
// getFolderList List both files and folders in a directory.
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
encodedDir := f.fromStandardPath(path)
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(encodedDir),
url.QueryEscape(f.opt.Key),
)
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
var response api.FolderListResponse
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if response.Status != 200 {
if strings.Contains(response.Msg, "Folder not found") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("API error: %s", response.Msg)
}
for index := range response.Result.Folders {
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
}
for index := range response.Result.Files {
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
}
return &response, nil
}
// deleteFolder deletes a folder at the specified path.
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
fullPath = f.fromStandardPath(fullPath)
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
f.endpoint,
url.QueryEscape(fullPath),
url.QueryEscape(f.opt.Key),
)
delResp := api.DeleteFolderResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
if err != nil {
return false, err
}
resp, err := f.client.Do(req)
if err != nil {
return fserrors.ShouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}
if err := json.Unmarshal(body, &delResp); err != nil {
return false, fmt.Errorf("error decoding delete response: %w", err)
}
if delResp.Status != 200 {
return false, fmt.Errorf("delete error: %s", delResp.Msg)
}
return false, nil
})
if err != nil {
return err
}
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
return nil
}
// getDirectLink of file from FileLu to download.
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.FileDirectLinkResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", 0, err
}
return result.Result.URL, result.Result.Size, nil
}
// deleteFile deletes a file based on filePath
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
filePath = f.fromStandardPath(filePath)
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
f.endpoint,
url.QueryEscape(filePath),
url.QueryEscape(f.opt.Key),
)
result := api.DeleteFileResponse{}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
return err
}
// getAccountInfo retrieves account information
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
opts := rest.Opts{
Method: "GET",
Path: "/account/info",
Parameters: url.Values{
"key": {f.opt.Key},
},
}
var result api.AccountInfoResponse
err := f.pacer.Call(func() (bool, error) {
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
return fserrors.ShouldRetry(callErr), callErr
})
if err != nil {
return nil, err
}
if result.Status != 200 {
return nil, fmt.Errorf("error: %s", result.Msg)
}
return &result, nil
}
// getFileInfo retrieves file information based on file code
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
u, _ := url.Parse(f.endpoint + "/file/info2")
q := u.Query()
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
q.Set("key", f.opt.Key)
u.RawQuery = q.Encode()
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
var body []byte
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
body, err = io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("error reading response body: %w", err)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return nil, err
}
result := api.FileInfoResponse{}
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
return nil, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 || len(result.Result) == 0 {
return nil, fs.ErrorObjectNotFound
}
return &result, nil
}

View File

@@ -1,193 +0,0 @@
package filelu
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"path"
"strings"
"github.com/rclone/rclone/fs"
)
// uploadFile uploads a file to FileLu
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
directory := path.Dir(fileFullPath)
fileName := path.Base(fileFullPath)
if directory == "." {
directory = ""
}
destinationFolderPath := path.Join(f.root, directory)
if destinationFolderPath != "" {
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
}
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
if err != nil {
if errors.Is(err, fs.ErrorDirNotFound) {
err = f.Mkdir(ctx, path.Dir(fileFullPath))
if err != nil {
return fmt.Errorf("failed to create directory: %w", err)
}
} else {
return fmt.Errorf("failed to list existing files: %w", err)
}
}
for _, entry := range existingEntries {
if entry.Remote() == fileFullPath {
_, ok := entry.(fs.Object)
if !ok {
continue
}
// If the file exists but is different, remove it
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
err = f.deleteFile(ctx, filePath)
if err != nil {
return fmt.Errorf("failed to delete existing file: %w", err)
}
}
}
uploadURL, sessID, err := f.getUploadServer(ctx)
if err != nil {
return fmt.Errorf("failed to retrieve upload server: %w", err)
}
// Since the fileCode isn't used, just handle the error
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
return nil
}
// getUploadServer gets the upload server URL with proper key authentication
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
var result struct {
Status int `json:"status"`
SessID string `json:"sess_id"`
Result string `json:"result"`
Msg string `json:"msg"`
}
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request: %w", err)
}
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("error decoding response: %w", err)
}
if result.Status != 200 {
return false, fmt.Errorf("API error: %s", result.Msg)
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", "", err
}
return result.Result, result.SessID, nil
}
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
destinationPath := f.fromStandardPath(dirPath)
encodedFileName := f.fromStandardPath(fileName)
pr, pw := io.Pipe()
writer := multipart.NewWriter(pw)
isDeletionRequired := false
go func() {
defer func() {
if err := pw.Close(); err != nil {
fs.Logf(nil, "Failed to close: %v", err)
}
}()
_ = writer.WriteField("sess_id", sessID)
_ = writer.WriteField("utype", "prem")
_ = writer.WriteField("fld_path", destinationPath)
part, err := writer.CreateFormFile("file_0", encodedFileName)
if err != nil {
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
return
}
if _, err := io.Copy(part, fileContent); err != nil {
isDeletionRequired = true
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
return
}
if err := writer.Close(); err != nil {
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
}
}()
var fileCode string
err := f.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
if err != nil {
return false, fmt.Errorf("failed to create upload request: %w", err)
}
req.Header.Set("Content-Type", writer.FormDataContentType())
resp, err := f.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
}
defer respBodyClose(resp.Body)
var result []struct {
FileCode string `json:"file_code"`
FileStatus string `json:"file_status"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, fmt.Errorf("failed to parse upload response: %w", err)
}
if len(result) == 0 || result[0].FileStatus != "OK" {
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
}
fileCode = result[0].FileCode
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil && isDeletionRequired {
// Attempt to delete the file if upload fails
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
}
return fileCode, err
}
// respBodyClose to check body response.
func respBodyClose(responseBody io.Closer) {
if cerr := responseBody.Close(); cerr != nil {
fmt.Printf("Error closing response body: %v\n", cerr)
}
}

View File

@@ -1,112 +0,0 @@
package filelu
import (
"context"
"errors"
"fmt"
"path"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
// errFileNotFound represent file not found error
var errFileNotFound = errors.New("file not found")
// getFileCode retrieves the file code for a given file path
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
// Prepare parent directory
parentDir := path.Dir(filePath)
// Call List to get all the files
result, err := f.getFolderList(ctx, parentDir)
if err != nil {
return "", err
}
for _, file := range result.Result.Files {
filePathFromServer := parentDir + "/" + file.Name
if parentDir == "/" {
filePathFromServer = "/" + file.Name
}
if filePath == filePathFromServer {
return file.FileCode, nil
}
}
return "", errFileNotFound
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
func (f *Fs) fromStandardPath(remote string) string {
return f.opt.Enc.FromStandardPath(remote)
}
func (f *Fs) toStandardPath(remote string) string {
return f.opt.Enc.ToStandardPath(remote)
}
// Hashes returns an empty hash set, indicating no hash support
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet() // Properly creates an empty hash set
}
// Name returns the remote name
func (f *Fs) Name() string {
return f.name
}
// Root returns the root path
func (f *Fs) Root() string {
return f.root
}
// Precision returns the precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
func (f *Fs) String() string {
return fmt.Sprintf("FileLu root '%s'", f.root)
}
// isFileCode checks if a string looks like a file code
func isFileCode(s string) bool {
if len(s) != 12 {
return false
}
for _, c := range s {
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
return false
}
}
return true
}
func shouldRetry(err error) bool {
return fserrors.ShouldRetry(err)
}
func shouldRetryHTTP(code int) bool {
return code == 429 || code >= 500
}
func rootSplit(absPath string) (bucket, bucketPath string) {
// No bucket
if absPath == "" {
return "", ""
}
slash := strings.IndexRune(absPath, '/')
// Bucket but no path
if slash < 0 {
return absPath, ""
}
return absPath[:slash], absPath[slash+1:]
}

View File

@@ -1,259 +0,0 @@
package filelu
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// Object describes a FileLu object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
}
// NewObject creates a new Object for the given remote path
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
var filePath string
filePath = path.Join(f.root, remote)
filePath = "/" + strings.Trim(filePath, "/")
// Get File code
fileCode, err := f.getFileCode(ctx, filePath)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
// Get File info
fileInfos, err := f.getFileInfo(ctx, fileCode)
if err != nil {
return nil, fmt.Errorf("failed to get file info: %w", err)
}
fileInfo := fileInfos.Result[0]
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
returnedRemote := remote
return &Object{
fs: f,
remote: returnedRemote,
size: size,
modTime: time.Now(),
}, nil
}
// Open opens the object for reading
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
filePath := path.Join(o.fs.root, o.remote)
// Get direct link
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
if err != nil {
return nil, fmt.Errorf("failed to get direct link: %w", err)
}
o.size = size
// Offset and Count for range download
var offset int64
var count int64
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
case *fs.SeekOption:
offset = x.Offset
count = o.size
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var reader io.ReadCloser
err = o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
if err != nil {
return false, fmt.Errorf("failed to create download request: %w", err)
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
}
if resp.StatusCode != http.StatusOK {
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
}
// Wrap the response body to handle offset and count
currentContents, err := io.ReadAll(resp.Body)
if err != nil {
return false, fmt.Errorf("failed to read response body: %w", err)
}
if offset > 0 {
if offset > int64(len(currentContents)) {
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
}
currentContents = currentContents[offset:]
}
if count > 0 && count < int64(len(currentContents)) {
currentContents = currentContents[:count]
}
reader = io.NopCloser(bytes.NewReader(currentContents))
return false, nil
})
if err != nil {
return nil, err
}
return reader, nil
}
// Update updates the object with new data
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if src.Size() <= 0 {
return fs.ErrorCantUploadEmptyFiles
}
err := o.fs.uploadFile(ctx, in, o.remote)
if err != nil {
return fmt.Errorf("failed to upload file: %w", err)
}
o.size = src.Size()
return nil
}
// Remove deletes the object from FileLu
func (o *Object) Remove(ctx context.Context) error {
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
err := o.fs.deleteFile(ctx, fullPath)
if err != nil {
return err
}
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
return nil
}
// Hash returns the MD5 hash of an object
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
var fileCode string
if isFileCode(o.fs.root) {
fileCode = o.fs.root
} else {
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
for _, match := range matches {
if len(match) > 1 && len(match[1]) == 12 {
fileCode = match[1]
break
}
}
}
if fileCode == "" {
return "", fmt.Errorf("no valid file code found in the remote path")
}
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
var result struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result []struct {
Hash string `json:"hash"`
} `json:"result"`
}
err := o.fs.pacer.Call(func() (bool, error) {
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
if err != nil {
return false, err
}
resp, err := o.fs.client.Do(req)
if err != nil {
return shouldRetry(err), err
}
defer func() {
if err := resp.Body.Close(); err != nil {
fs.Logf(nil, "Failed to close response body: %v", err)
}
}()
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return false, err
}
return shouldRetryHTTP(resp.StatusCode), nil
})
if err != nil {
return "", err
}
if result.Status != 200 || len(result.Result) == 0 {
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
}
return result.Result[0].Hash, nil
}
// String returns a string representation of the object
func (o *Object) String() string {
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the object
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Storable indicates whether the object is storable
func (o *Object) Storable() bool {
return true
}

View File

@@ -1,16 +0,0 @@
package filelu_test
import (
"testing"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests for the FileLu backend
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestFileLu:",
NilObject: nil,
SkipInvalidUTF8: true,
})
}

View File

@@ -1,15 +0,0 @@
package filelu
import (
"fmt"
)
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
func parseStorageToBytes(storage string) (int64, error) {
var gb float64
_, err := fmt.Sscanf(storage, "%f", &gb)
if err != nil {
return 0, fmt.Errorf("failed to parse storage: %w", err)
}
return int64(gb * 1024 * 1024 * 1024), nil
}

View File

@@ -9,7 +9,6 @@ import (
"io"
"net"
"net/textproto"
"net/url"
"path"
"runtime"
"strings"
@@ -163,16 +162,6 @@ Enabled by default. Use 0 to disable.`,
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "allow_insecure_tls_ciphers",
Help: `Allow insecure TLS ciphers
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
- TLS_RSA_WITH_AES_128_GCM_SHA256
`,
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
@@ -196,14 +185,6 @@ Supports the format user:pass@host:port, user@host:port, host:port.
Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`,
Advanced: true,
}, {
@@ -246,30 +227,28 @@ a write only folder.
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@@ -283,12 +262,10 @@ type Fs struct {
user string
pass string
dialAddr string
tlsConf *tls.Config // default TLS client config
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -409,14 +386,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
if f.tlsConf != nil {
tlsConfig = f.tlsConf.Clone()
} else {
tlsConfig = new(tls.Config)
}
tlsConfig.ServerName = f.opt.Host
if f.opt.SkipVerifyTLSCert {
tlsConfig.InsecureSkipVerify = true
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
@@ -424,14 +396,6 @@ func (f *Fs) tlsConfig() *tls.Config {
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
if f.opt.AllowInsecureTLSCiphers {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
}
}
return tlsConfig
}
@@ -449,26 +413,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
if err != nil {
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
} else {
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
}
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
// We need to make the onward connection to f.opt.Host. However the FTP
// library sets the host to the proxy IP after using EPSV or PASV so we need
// to correct that here.
var dialPort string
_, dialPort, err = net.SplitHostPort(address)
if err != nil {
return nil, err
}
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
@@ -677,20 +626,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
PartialUploads: true,
}).Fill(ctx, f)
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
// set the pool drainer timer going
if f.opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })

View File

@@ -52,7 +52,7 @@ func (f *Fs) testUploadTimeout(t *testing.T) {
ci.Timeout = saveTimeout
}()
ci.LowLevelRetries = 1
ci.Timeout = fs.Duration(idleTimeout)
ci.Timeout = idleTimeout
upload := func(concurrency int, shutTimeout time.Duration) (obj fs.Object, err error) {
fixFs := deriveFs(ctx, t, f, settings{

View File

@@ -194,9 +194,33 @@ type DeleteResponse struct {
Data map[string]Error
}
// DirectUploadURL returns the direct upload URL for Gofile
func DirectUploadURL() string {
return "https://upload.gofile.io/uploadfile"
// Server is an upload server
type Server struct {
Name string `json:"name"`
Zone string `json:"zone"`
}
// String returns a string representation of the Server
func (s *Server) String() string {
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
}
// Root returns the root URL for the server
func (s *Server) Root() string {
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
}
// URL returns the upload URL for the server
func (s *Server) URL() string {
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
}
// ServersResponse is the output from /servers
type ServersResponse struct {
Error
Data struct {
Servers []Server `json:"servers"`
} `json:"data"`
}
// UploadResponse is returned by POST /contents/uploadfile

View File

@@ -8,11 +8,13 @@ import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/gofile/api"
@@ -35,8 +37,10 @@ const (
maxSleep = 20 * time.Second
decayConstant = 1 // bigger for slower decay, exponential
rootURL = "https://api.gofile.io"
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
serversExpiry = 60 * time.Second // check for new upload servers this often
serversActive = 2 // choose this many closest upload servers to use
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
)
/*
@@ -124,13 +128,16 @@ type Options struct {
// Fs represents a remote gofile
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
serversMu *sync.Mutex // protect the servers info below
servers []api.Server // upload servers we can use
serversChecked time.Time // time the servers were refreshed
}
// Object describes a gofile object
@@ -304,11 +311,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
serversMu: new(sync.Mutex),
}
f.features = (&fs.Features{
CaseInsensitive: false,
@@ -427,6 +435,98 @@ func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err erro
return nil
}
// Find the top n servers measured by response time
func (f *Fs) bestServers(ctx context.Context, servers []api.Server, n int) (newServers []api.Server) {
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second))
defer cancel()
if n > len(servers) {
n = len(servers)
}
results := make(chan int, len(servers))
// Test how long the servers take to respond
for i := range servers {
i := i // for closure
go func() {
opts := rest.Opts{
Method: "GET",
RootURL: servers[i].Root(),
}
var result api.UploadServerStatus
start := time.Now()
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
ping := time.Since(start)
err = result.Err(err)
if err != nil {
results <- -1 // send a -ve number on error
return
}
fs.Debugf(nil, "Upload server %v responded in %v", &servers[i], ping)
results <- i
}()
}
// Wait for n servers to respond
newServers = make([]api.Server, 0, n)
for range servers {
i := <-results
if i >= 0 {
newServers = append(newServers, servers[i])
}
if len(newServers) >= n {
break
}
}
return newServers
}
// Clear all the upload servers - call on an error
func (f *Fs) clearServers() {
f.serversMu.Lock()
defer f.serversMu.Unlock()
fs.Debugf(f, "Clearing upload servers")
f.servers = nil
}
// Gets an upload server
func (f *Fs) getServer(ctx context.Context) (server *api.Server, err error) {
f.serversMu.Lock()
defer f.serversMu.Unlock()
if len(f.servers) == 0 || time.Since(f.serversChecked) >= serversExpiry {
opts := rest.Opts{
Method: "GET",
Path: "/servers",
}
var result api.ServersResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err = result.Err(err); err != nil {
if len(f.servers) == 0 {
return nil, fmt.Errorf("failed to read upload servers: %w", err)
}
fs.Errorf(f, "failed to read new upload servers: %v", err)
} else {
// Find the top servers measured by response time
f.servers = f.bestServers(ctx, result.Data.Servers, serversActive)
f.serversChecked = time.Now()
}
}
if len(f.servers) == 0 {
return nil, errors.New("no upload servers found")
}
// Pick a server at random since we've already found the top ones
i := rand.Intn(len(f.servers))
return &f.servers[i], nil
}
// rootSlash returns root with a slash on if it is empty, otherwise empty string
func (f *Fs) rootSlash() string {
if f.root == "" {
@@ -1426,6 +1526,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Find an upload server
server, err := o.fs.getServer(ctx)
if err != nil {
return err
}
fs.Debugf(o, "Using upload server %v", server)
// If the file exists, delete it after a successful upload
if o.id != "" {
id := o.id
@@ -1454,7 +1561,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
},
MultipartContentName: "file",
MultipartFileName: o.fs.opt.Enc.FromStandardName(leaf),
RootURL: api.DirectUploadURL(),
RootURL: server.URL(),
Options: options,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1462,6 +1569,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return shouldRetry(ctx, resp, err)
})
if err = result.Err(err); err != nil {
if isAPIErr(err, "error-freespace") {
fs.Errorf(o, "Upload server out of space - need to retry upload")
}
o.fs.clearServers()
return fmt.Errorf("failed to upload file: %w", err)
}
return o.setMetaData(&result.Data)

View File

@@ -483,9 +483,6 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
bucketPath = bucketPath[:len(bucketPath)-1]
}
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -715,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
continue
}
// process directory markers as directories
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
}
remote = remote[len(prefix):]
if addBucket {
@@ -760,7 +757,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -768,16 +765,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return err
return entries, err
}
// listBuckets lists the buckets
@@ -820,46 +817,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -994,7 +959,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
@@ -1494,7 +1459,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -43,7 +43,6 @@ var (
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
errReadOnly = errors.New("can't upload files in read only mode")
)
const (
@@ -53,31 +52,19 @@ const (
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
// scopes needed for read write access
scopesReadWrite = []string{
"openid",
"profile",
scopeAppendOnly,
scopeReadOnly,
scopeReadWrite,
}
// scopes needed for read only access
scopesReadOnly = []string{
"openid",
"profile",
scopeReadOnly,
}
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: scopesReadWrite,
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
@@ -113,26 +100,20 @@ func init() {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes = scopesReadOnly
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes = scopesReadWrite
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning1":
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning2":
// Warn the user that rclone can no longer download photos it didnt upload from google photos
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
case "warning_done":
return nil, nil
}
@@ -352,7 +333,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
baseClient := fshttp.NewClient(ctx)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, fmt.Errorf("failed to configure google photos: %w", err)
return nil, fmt.Errorf("failed to configure Box: %w", err)
}
root = strings.Trim(path.Clean(root), "/")
@@ -1139,9 +1120,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if !album.IsWriteable {
if o.fs.opt.ReadOnly {
return errReadOnly
}
return errOwnAlbums
}

View File

@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(info.Capacity),
Used: fs.NewUsageValue(info.Used),
Free: fs.NewUsageValue(info.Remaining),
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}

View File

@@ -252,14 +252,18 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
}
resp, err := d.icloud.srv.Call(ctx, opts)
// icloud has some weird http codes
if err != nil && resp != nil && resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return resp, err
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.

View File

@@ -421,9 +421,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
return uploadFile(ctx, f, in, src.Remote(), options...)
}
@@ -662,9 +659,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
// return an error or update the object properly (rather than e.g. calling panic).
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
if src.Size() == 0 {
return fs.ErrorCantUploadEmptyFiles
}
srcRemote := o.Remote()
@@ -676,7 +670,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var resp *client.UploadResult
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
err = o.fs.pacer.Call(func() (bool, error) {
var res *http.Response
res, resp, err = o.fs.ik.Upload(ctx, in, client.UploadParam{
FileName: fileName,
@@ -731,7 +725,7 @@ func uploadFile(ctx context.Context, f *Fs, in io.Reader, srcRemote string, opti
UseUniqueFileName := new(bool)
*UseUniqueFileName = false
err := f.pacer.CallNoRetry(func() (bool, error) {
err := f.pacer.Call(func() (bool, error) {
var res *http.Response
var err error
res, _, err = f.ik.Upload(ctx, in, client.UploadParam{
@@ -800,10 +794,35 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
return metadata, nil
}
// Copy src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
return nil, fs.ErrorCantMove
}
file, err := srcObj.Open(ctx)
if err != nil {
return nil, err
}
return uploadFile(ctx, f, file, remote)
}
// Check the interfaces are satisfied.
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.Copier = &Fs{}
)

View File

@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
}
// Copy src to this remote using server-side copy operations.
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
"x-archive-auto-make-bucket": "1",
"x-archive-queue-derive": "0",
"x-archive-keep-old-version": "0",
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
"x-amz-metadata-directive": "COPY",
"x-archive-filemeta-sha1": srcObj.sha1,
"x-archive-filemeta-md5": srcObj.md5,
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1334,6 +1334,16 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
for _, v := range seg {
newValues = append(newValues, url.PathEscape(v))
}
return strings.Join(newValues, "/")
}
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}

View File

@@ -461,7 +461,7 @@ func translateErrorsDir(err error) error {
return err
}
// translateErrorsObject translates Koofr errors to rclone errors (for an object operation)
// translatesErrorsObject translates Koofr errors to rclone errors (for an object operation)
func translateErrorsObject(err error) error {
switch err := err.(type) {
case httpclient.InvalidStatusError:

View File

@@ -617,36 +617,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
case 1:
// upload file using link from first step
var res *http.Response
var location string
// Check to see if we are being redirected
opts := &rest.Opts{
Method: "HEAD",
RootURL: getFirstStepResult.Data.SignURL,
Options: options,
NoRedirect: true,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, opts)
return o.fs.shouldRetry(ctx, res, err)
})
if res != nil {
location = res.Header.Get("Location")
if location != "" {
// set the URL to the new Location
opts.RootURL = location
err = nil
}
}
if err != nil {
return fmt.Errorf("head upload URL: %w", err)
}
file := io.MultiReader(bytes.NewReader(first10mBytes), in)
opts.Method = "PUT"
opts.Body = file
opts.ContentLength = &size
opts := &rest.Opts{
Method: "PUT",
RootURL: getFirstStepResult.Data.SignURL,
Options: options,
Body: file,
ContentLength: &size,
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
res, err = o.fs.srv.Call(ctx, opts)

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"path/filepath"
@@ -306,12 +305,6 @@ only useful for reading.
Help: "The last status change time.",
}},
},
{
Name: "hashes",
Help: `Comma separated list of supported checksum types.`,
Default: fs.CommaSepList{},
Advanced: true,
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -338,7 +331,6 @@ type Options struct {
NoSparse bool `config:"no_sparse"`
NoSetModTime bool `config:"no_set_modtime"`
TimeType timeType `config:"time_type"`
Hashes fs.CommaSepList `config:"hashes"`
Enc encoder.MultiEncoder `config:"encoding"`
NoClone bool `config:"no_clone"`
}
@@ -672,12 +664,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
symlinkFlag := os.ModeSymlink
if runtime.GOOS == "windows" {
symlinkFlag |= os.ModeIrregular
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
@@ -699,13 +687,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
}
// Don't include non directory if not included
@@ -842,13 +830,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
err := os.Remove(localPath)
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
if os.Chmod(localPath, 0o600) == nil {
err = os.Remove(localPath)
}
}
return err
return os.Remove(localPath)
}
// Precision of the file system
@@ -1039,19 +1021,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if len(f.opt.Hashes) > 0 {
// Return only configured hashes.
// Note: Could have used hash.SupportOnly to limit supported hashes for all hash related features.
var supported hash.Set
for _, hashName := range f.opt.Hashes {
var ht hash.Type
if err := ht.Set(hashName); err != nil {
fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String())
}
supported.Add(ht)
}
return supported
}
return hash.Supported()
}
@@ -1121,10 +1090,6 @@ func (o *Object) Remote() string {
// Hash returns the requested hash of a file as a lowercase hex string
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
if r == hash.None {
return "", nil
}
// Check that the underlying file hasn't changed
o.fs.objectMetaMu.RLock()
oldtime := o.modTime
@@ -1232,15 +1197,7 @@ func (o *Object) Storable() bool {
o.fs.objectMetaMu.RLock()
mode := o.mode
o.fs.objectMetaMu.RUnlock()
// On Windows items with os.ModeIrregular are likely Junction
// points so we treat them as symlinks for the purpose of ignoring them.
// https://github.com/golang/go/issues/73827
symlinkFlag := os.ModeSymlink
if runtime.GOOS == "windows" {
symlinkFlag |= os.ModeIrregular
}
if mode&symlinkFlag != 0 && !o.fs.opt.TranslateSymlinks {
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
if !o.fs.opt.SkipSymlinks {
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
}

View File

@@ -204,23 +204,6 @@ func TestSymlinkError(t *testing.T) {
assert.Equal(t, errLinksAndCopyLinks, err)
}
func TestHashWithTypeNone(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
const filePath = "file.txt"
r.WriteFile(filePath, "content", time.Now())
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
h, err := o.Hash(ctx, hash.None)
require.Empty(t, h)
require.NoError(t, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
@@ -334,7 +317,7 @@ func TestMetadata(t *testing.T) {
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Local().Format(time.RFC3339Nano)
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)

View File

@@ -1,40 +0,0 @@
//go:build windows
package local
import (
"context"
"path/filepath"
"runtime"
"syscall"
"testing"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
// Microsoft docs indicate that "This attribute is not honored on directories."
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
// and https://github.com/golang/go/issues/26295
func TestRmdirWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skipf("windows only")
}
r := fstest.NewRun(t)
defer r.Finalise()
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
require.NoError(t, err)
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
require.NoError(t, err)
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
require.NoError(t, err)
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
assert.NoError(t, err)
}

View File

@@ -400,7 +400,7 @@ type quirks struct {
}
func (q *quirks) parseQuirks(option string) {
for flag := range strings.SplitSeq(option, ",") {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
@@ -634,7 +634,7 @@ func (f *Fs) readItemMetaData(ctx context.Context, path string) (entry fs.DirEnt
return
}
// itemToDirEntry converts API item to rclone directory entry
// itemToEntry converts API item to rclone directory entry
// The dirSize return value is:
//
// <0 - for a file or in case of error
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupAny = false
uniqueValidPatterns := make(map[string]any)
for pattern := range strings.SplitSeq(patternString, ",") {
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
if pattern == "" {
continue

View File

@@ -17,11 +17,9 @@ Improvements:
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net/http"
"path"
"slices"
"strings"
@@ -218,25 +216,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
defer megaCacheMu.Unlock()
srv := megaCache[opt.User]
if srv == nil {
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
// Relevant issues:
// https://github.com/rclone/rclone/issues/8565
// https://github.com/meganz/webclient/issues/103
clt := fshttp.NewClient(ctx)
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
})
srv = mega.New().SetClient(clt)
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...any) {
@@ -946,9 +926,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@@ -325,12 +325,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return callback(entry)
entries = append(entries, entry)
return nil
})
return err
return entries, err
}
// listBuckets lists the buckets to entries
@@ -353,46 +354,15 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -659,7 +629,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -243,6 +243,7 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
numSet = 0
for k, v := range metadata {
k, v := k, v
switch k {
case "mtime":
t, err := time.Parse(timeFormatIn, v)
@@ -421,7 +422,12 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
@@ -743,8 +749,6 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
// Fetch metadata and update updateInfo if --metadata is in use
// modtime will still be set when there is no metadata to set
//
// May return info=nil and err=nil if there was no metadata to update.
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
if err != nil {
@@ -764,8 +768,6 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
}
// updateMetadata calls Get, Set, and Write
//
// May return info=nil and err=nil if there was no metadata to update.
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
_, err = o.meta.Get(ctx) // refresh permissions
if err != nil {

View File

@@ -56,7 +56,6 @@ const (
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.Mebi
chunkSizeMultiple = 320 * fs.Kibi
maxSinglePartSize = 4 * fs.Mebi
regionGlobal = "global"
regionUS = "us"
@@ -139,21 +138,6 @@ func init() {
Help: "Azure and Office 365 operated by Vnet Group in China",
},
},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
This is disabled by default as uploading using single part uploads
causes rclone to use twice the storage on Onedrive business as when
rclone sets the modification time after the upload Onedrive creates a
new version.
See: https://github.com/rclone/rclone/issues/1716
`,
Default: fs.SizeSuffix(-1),
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -762,7 +746,6 @@ Examples:
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
@@ -1039,13 +1022,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxSinglePartSize {
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
}
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1059,10 +1035,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
@@ -1782,9 +1754,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
if err != nil {
return nil, err
}
if info != nil {
err = dstObj.setMetaData(info)
}
err = dstObj.setMetaData(info)
return dstObj, err
}
@@ -1864,9 +1834,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
if err != nil {
return nil, err
}
if info != nil {
err = dstObj.setMetaData(info)
}
err = dstObj.setMetaData(info)
return dstObj, err
}
@@ -2501,10 +2469,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return false, nil
}
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
time.Sleep(5 * time.Second) // a little delay to help things along
return true, err
}
if err != nil {
return shouldRetry(ctx, resp, err)
@@ -2599,8 +2563,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
size := src.Size()
if size < 0 || size > int64(maxSinglePartSize) {
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
}
fs.Debugf(o, "Starting singlepart upload")
@@ -2633,10 +2597,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
if err != nil {
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
}
if info != nil {
err = o.setMetaData(info)
}
return info, err
return info, o.setMetaData(info)
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -2656,9 +2617,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var info *api.Item
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
if size > 0 {
info, err = o.uploadMultipart(ctx, in, src, options...)
} else if size >= 0 {
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, src, options...)
} else {
return errors.New("unknown-sized upload not supported")

View File

@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
require.NoError(b, err)
require.Equal(b, len(buf), n)
h := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)

View File

@@ -12,7 +12,6 @@ import (
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
@@ -34,46 +33,9 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as opc-meta- keys.`,
},
})
}
var systemMetadataInfo = map[string]fs.MetadataHelp{
"opc-meta-mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"opc-meta-uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-atime": {
Help: "Time of last access",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-mtime": {
Help: "Time of last modification",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-btime": {
Help: "Time of file birth (creation)",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
@@ -120,7 +82,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMetadata: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
@@ -254,47 +215,15 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
@@ -443,24 +372,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -759,45 +688,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return list.Flush()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx)
if err != nil {
return nil, err
}
metadata = make(fs.Metadata, len(o.meta)+7)
for k, v := range o.meta {
switch k {
case metaMtime:
if modTime, err := swift.FloatStringToTime(v); err == nil {
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
}
case metaMD5Hash:
// don't write hash metadata
default:
metadata[k] = v
}
}
if o.mimeType != "" {
metadata["content-type"] = o.mimeType
}
if !o.lastModified.IsZero() {
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
}
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -378,20 +378,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, nil
}
// XOpenWriterAt opens with a handle for random access writes
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object.
//
// OpenWriterAt disabled because it seems to have been disabled at pcloud
// PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1
//
// {
// "result": 2003,
// "error": "Access denied. You do not have permissions to perform this operation."
// }
func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
client, err := f.newSingleConnClient(ctx)
if err != nil {
return nil, fmt.Errorf("create client: %w", err)

View File

@@ -155,7 +155,6 @@ func (f *Fs) getFile(ctx context.Context, ID string) (info *api.File, err error)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rst.CallJSON(ctx, &opts, nil, &info)
if err == nil && !info.Links.ApplicationOctetStream.Valid() {
time.Sleep(5 * time.Second)
return true, errors.New("no link")
}
return f.shouldRetry(ctx, resp, err)

View File

@@ -1,333 +0,0 @@
package pikpak
import (
"context"
"fmt"
"io"
"sort"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
const (
bufferSize = 1024 * 1024 // default size of the pages used in the reader
bufferCacheSize = 64 // max number of buffers to keep in cache
bufferCacheFlushTime = 5 * time.Second // flush the cached buffers after this long
)
// bufferPool is a global pool of buffers
var (
bufferPool *pool.Pool
bufferPoolOnce sync.Once
)
// get a buffer pool
func getPool() *pool.Pool {
bufferPoolOnce.Do(func() {
ci := fs.GetConfig(context.Background())
// Initialise the buffer pool when used
bufferPool = pool.New(bufferCacheFlushTime, bufferSize, bufferCacheSize, ci.UseMmap)
})
return bufferPool
}
// NewRW gets a pool.RW using the multipart pool
func NewRW() *pool.RW {
return pool.NewRW(getPool())
}
// Upload does a multipart upload in parallel
func (w *pikpakChunkWriter) Upload(ctx context.Context) (err error) {
// make concurrency machinery
tokens := pacer.NewTokenDispenser(w.con)
uploadCtx, cancel := context.WithCancel(ctx)
defer cancel()
defer atexit.OnError(&err, func() {
cancel()
fs.Debugf(w.o, "multipart upload: Cancelling...")
errCancel := w.Abort(ctx)
if errCancel != nil {
fs.Debugf(w.o, "multipart upload: failed to cancel: %v", errCancel)
}
})()
var (
g, gCtx = errgroup.WithContext(uploadCtx)
finished = false
off int64
size = w.size
chunkSize = w.chunkSize
)
// Do the accounting manually
in, acc := accounting.UnWrapAccounting(w.in)
for partNum := int64(0); !finished; partNum++ {
// Get a block of memory from the pool and token which limits concurrency.
tokens.Get()
rw := NewRW()
if acc != nil {
rw.SetAccounting(acc.AccountRead)
}
free := func() {
// return the memory and token
_ = rw.Close() // Can't return an error
tokens.Put()
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
// Read the chunk
var n int64
n, err = io.CopyN(rw, in, chunkSize)
if err == io.EOF {
if n == 0 && partNum != 0 { // end if no data and if not first chunk
free()
break
}
finished = true
} else if err != nil {
free()
return fmt.Errorf("multipart upload: failed to read source: %w", err)
}
partNum := partNum
partOff := off
off += n
g.Go(func() (err error) {
defer free()
fs.Debugf(w.o, "multipart upload: starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(partOff), fs.SizeSuffix(size))
_, err = w.WriteChunk(gCtx, int32(partNum), rw)
return err
})
}
err = g.Wait()
if err != nil {
return err
}
err = w.Close(ctx)
if err != nil {
return fmt.Errorf("multipart upload: failed to finalise: %w", err)
}
return nil
}
var warnStreamUpload sync.Once
// state of ChunkWriter
type pikpakChunkWriter struct {
chunkSize int64
size int64
con int
f *Fs
o *Object
in io.Reader
mu sync.Mutex
completedParts []types.CompletedPart
client *s3.Client
mOut *s3.CreateMultipartUploadOutput
}
func (f *Fs) newChunkWriter(ctx context.Context, remote string, size int64, p *api.ResumableParams, in io.Reader, options ...fs.OpenOption) (w *pikpakChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(maxUploadParts)))
})
} else {
chunkSize = chunksize.Calculator(o, size, maxUploadParts, chunkSize)
}
client, err := f.newS3Client(ctx, p)
if err != nil {
return nil, fmt.Errorf("failed to create upload client: %w", err)
}
w = &pikpakChunkWriter{
chunkSize: int64(chunkSize),
size: size,
con: max(1, f.opt.UploadConcurrency),
f: f,
o: o,
in: in,
completedParts: make([]types.CompletedPart, 0),
client: client,
}
req := &s3.CreateMultipartUploadInput{
Bucket: &p.Bucket,
Key: &p.Key,
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.CacheControl = aws.String(value)
case "content-disposition":
req.ContentDisposition = aws.String(value)
case "content-encoding":
req.ContentEncoding = aws.String(value)
case "content-type":
req.ContentType = aws.String(value)
}
}
err = w.f.pacer.Call(func() (bool, error) {
w.mOut, err = w.client.CreateMultipartUpload(ctx, req)
return w.shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("create multipart upload failed: %w", err)
}
fs.Debugf(w.o, "multipart upload: %q initiated", *w.mOut.UploadId)
return
}
// shouldRetry returns a boolean as to whether this err
// deserve to be retried. It returns the err as a convenience
func (w *pikpakChunkWriter) shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if fserrors.ShouldRetry(err) {
return true, err
}
return false, err
}
// add a part number and etag to the completed parts
func (w *pikpakChunkWriter) addCompletedPart(part types.CompletedPart) {
w.mu.Lock()
defer w.mu.Unlock()
w.completedParts = append(w.completedParts, part)
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *pikpakChunkWriter) WriteChunk(ctx context.Context, chunkNumber int32, reader io.ReadSeeker) (currentChunkSize int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
partNumber := chunkNumber + 1
var res *s3.UploadPartOutput
err = w.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
currentChunkSize, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading md5
_, err := reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
res, err = w.client.UploadPart(ctx, &s3.UploadPartInput{
Bucket: w.mOut.Bucket,
Key: w.mOut.Key,
UploadId: w.mOut.UploadId,
PartNumber: &partNumber,
Body: reader,
})
if err != nil {
if chunkNumber <= 8 {
return w.shouldRetry(ctx, err)
}
// retry all chunks once have done the first few
return true, err
}
return false, nil
})
if err != nil {
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", partNumber, currentChunkSize, err)
}
w.addCompletedPart(types.CompletedPart{
PartNumber: &partNumber,
ETag: res.ETag,
})
fs.Debugf(w.o, "multipart upload: wrote chunk %d with %v bytes", partNumber, currentChunkSize)
return currentChunkSize, err
}
// Abort the multipart upload
func (w *pikpakChunkWriter) Abort(ctx context.Context) (err error) {
// Abort the upload session
err = w.f.pacer.Call(func() (bool, error) {
_, err = w.client.AbortMultipartUpload(ctx, &s3.AbortMultipartUploadInput{
Bucket: w.mOut.Bucket,
Key: w.mOut.Key,
UploadId: w.mOut.UploadId,
})
return w.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to abort multipart upload %q: %w", *w.mOut.UploadId, err)
}
fs.Debugf(w.o, "multipart upload: %q aborted", *w.mOut.UploadId)
return
}
// Close and finalise the multipart upload
func (w *pikpakChunkWriter) Close(ctx context.Context) (err error) {
// sort the completed parts by part number
sort.Slice(w.completedParts, func(i, j int) bool {
return *w.completedParts[i].PartNumber < *w.completedParts[j].PartNumber
})
// Finalise the upload session
err = w.f.pacer.Call(func() (bool, error) {
_, err = w.client.CompleteMultipartUpload(ctx, &s3.CompleteMultipartUploadInput{
Bucket: w.mOut.Bucket,
Key: w.mOut.Key,
UploadId: w.mOut.UploadId,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: w.completedParts,
},
})
return w.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err)
}
fs.Debugf(w.o, "multipart upload: %q finished", *w.mOut.UploadId)
return
}

View File

@@ -41,10 +41,12 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/rclone/rclone/backend/pikpak/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
@@ -64,22 +66,17 @@ import (
// Constants
const (
clientID = "YUMx5nI8ZU8Ap8pm"
clientVersion = "2.0.0"
packageName = "mypikpak.com"
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
maxUploadParts = 10000 // Part number must be an integer between 1 and 10000, inclusive.
defaultChunkSize = fs.SizeSuffix(1024 * 1024 * 5) // Part size should be in [100KB, 5GB]
minChunkSize = 100 * fs.Kibi
maxChunkSize = 5 * fs.Gibi
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = 5 * fs.Gibi // maximum allowed size for singlepart uploads
clientID = "YUMx5nI8ZU8Ap8pm"
clientVersion = "2.0.0"
packageName = "mypikpak.com"
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
taskWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
rootURL = "https://api-drive.mypikpak.com"
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
defaultUploadConcurrency = manager.DefaultUploadConcurrency
)
// Globals
@@ -226,14 +223,6 @@ Fill in for rclone to use a non root folder as its starting point.
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
Default: fs.SizeSuffix(10 * 1024 * 1024),
Advanced: true,
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size for multipart uploads.
@@ -252,7 +241,7 @@ large file of known size to stay below the 10,000 chunks limit.
Increasing the chunk size decreases the accuracy of the progress
statistics displayed with "-P" flag.`,
Default: defaultChunkSize,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
@@ -268,7 +257,7 @@ in memory.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Default: defaultUploadConcurrency,
Advanced: true,
}, {
Name: config.ConfigEncoding,
@@ -305,7 +294,6 @@ type Options struct {
NoMediaLink bool `config:"no_media_link"`
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
UploadConcurrency int `config:"upload_concurrency"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -479,11 +467,6 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
// when a zero-byte file was uploaded with an invalid captcha token
f.rst.captcha.Invalidate()
return true, err
} else if strings.Contains(apiErr.Reason, "idx.shub.mypikpak.com") && apiErr.Code == 500 {
// internal server error: Post "http://idx.shub.mypikpak.com": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
// This typically happens when trying to retrieve a gcid for which no record exists.
// No retry is needed in this case.
return false, err
}
}
@@ -541,39 +524,6 @@ func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
return nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// newFs partially constructs Fs from the path
//
// It constructs a valid Fs but doesn't attempt to figure out whether
@@ -581,17 +531,11 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
if err := configstruct.Set(m, opt); err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, fmt.Errorf("pikpak: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, fmt.Errorf("pikpak: upload cutoff: %w", err)
if opt.ChunkSize < minChunkSize {
return nil, fmt.Errorf("chunk size must be at least %s", minChunkSize)
}
root := parsePath(path)
@@ -979,24 +923,6 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
return nil
}
// untrash a file or directory by ID
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the restored item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
if len(IDs) == 0 {
return nil
}
req := api.RequestBatch{
IDs: IDs,
}
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
return fmt.Errorf("untrash object failed: %w", err)
}
return nil
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
@@ -1081,14 +1007,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.waitTask(ctx, info.TaskID)
}
// Move the object to a new parent folder
//
// Objects cannot be moved to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the moved item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
// Move the object
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 {
return nil
@@ -1104,12 +1023,6 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
}
// renames the object
//
// The new name must be different from the current name.
// "file_rename_to_same_name" (3): Name of file or folder is not changed
//
// Within the same folder, object names must be unique.
// "file_duplicated_name" (3): File name cannot be repeated
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
req := api.File{
Name: f.opt.Enc.FromStandardName(newName),
@@ -1194,13 +1107,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
}
@@ -1211,74 +1129,31 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, err
}
if srcObj.parent != dstParentID {
// Perform the move. A numbered copy might be generated upon name collision.
if srcParentID != dstParentID {
// Do the move
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
return nil, err
}
defer func() {
if err != nil {
// FIXME: Restored file might have a numbered name if a conflict occurs
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
}
}
}()
}
// Manually update info of moved object to save API calls
dstObj.id = srcObj.id
dstObj.mimeType = srcObj.mimeType
dstObj.gcid = srcObj.gcid
dstObj.md5sum = srcObj.md5sum
dstObj.hasMetaData = true
// Find the moved object and any conflict object with the same name.
var moved, conflict *api.File
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
if item.ID == srcObj.id {
moved = item
if item.Name == dstLeaf {
return true
}
} else if item.Name == dstLeaf {
conflict = item
if srcLeaf != dstLeaf {
// Rename
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
}
// Stop early if both found
return moved != nil && conflict != nil
})
if err != nil {
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
return dstObj, dstObj.setMetaData(info)
}
if moved == nil {
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
}
// If moved object already has the correct name, return
if moved.Name == dstLeaf {
return dstObj, dstObj.setMetaData(moved)
}
// If name collision, delete conflicting file first
if conflict != nil {
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
}
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
}
return dstObj, dstObj.setMetaData(info)
return dstObj, nil
}
// copy objects
//
// Objects cannot be copied to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the copied item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 {
return nil
@@ -1302,13 +1177,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
@@ -1323,55 +1198,31 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
fs.Debugf(src, "Can't copy - same parent")
return nil, fs.ErrorCantCopy
}
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
var conflict *api.File
_, srcLeaf := dircache.SplitPath(srcObj.remote)
if srcLeaf == dstLeaf {
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
// delete conflicting file
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
} else {
dstDir, _ := dircache.SplitPath(remote)
dstObj.remote = path.Join(dstDir, srcLeaf)
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
}
defer func() {
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
}
// Copy the object
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
err = dstObj.readMetaData(ctx)
if err != nil {
// Update info of the copied object with new parent but source name
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
} else if err = dstObj.setMetaData(info); err != nil {
return nil, err
}
// Can't copy and change name in one step so we have to check if we have
// the correct name after copy
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
}
if srcLeaf != dstLeaf {
return f.Move(ctx, dstObj, remote)
// Rename
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
}
return dstObj, dstObj.setMetaData(info)
}
return dstObj, nil
}
@@ -1409,7 +1260,9 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
return
}
func (f *Fs) newS3Client(ctx context.Context, p *api.ResumableParams) (s3Client *s3.Client, err error) {
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable) (err error) {
p := resumable.Params
// Create a credentials provider
creds := credentials.NewStaticCredentialsProvider(p.AccessKeyID, p.AccessKeySecret, p.SecurityToken)
@@ -1419,64 +1272,22 @@ func (f *Fs) newS3Client(ctx context.Context, p *api.ResumableParams) (s3Client
if err != nil {
return
}
ci := fs.GetConfig(ctx)
cfg.RetryMaxAttempts = ci.LowLevelRetries
cfg.HTTPClient = getClient(ctx, &f.opt)
client := s3.NewFromConfig(cfg, func(o *s3.Options) {
o.BaseEndpoint = aws.String("https://mypikpak.com/")
o.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
o.ResponseChecksumValidation = aws.ResponseChecksumValidationWhenRequired
})
return client, nil
}
partSize := chunksize.Calculator(name, size, int(manager.MaxUploadParts), f.opt.ChunkSize)
func (f *Fs) uploadByResumable(ctx context.Context, in io.Reader, name string, size int64, resumable *api.Resumable, options ...fs.OpenOption) (err error) {
p := resumable.Params
if size < 0 || size >= int64(f.opt.UploadCutoff) {
mu, err := f.newChunkWriter(ctx, name, size, p, in, options...)
if err != nil {
return fmt.Errorf("multipart upload failed to initialise: %w", err)
}
return mu.Upload(ctx)
}
// upload singlepart
client, err := f.newS3Client(ctx, p)
if err != nil {
return fmt.Errorf("failed to create upload client: %w", err)
}
req := &s3.PutObjectInput{
// Create an uploader with custom options
uploader := manager.NewUploader(client, func(u *manager.Uploader) {
u.PartSize = int64(partSize)
u.Concurrency = f.opt.UploadConcurrency
})
// Perform an upload
_, err = uploader.Upload(ctx, &s3.PutObjectInput{
Bucket: &p.Bucket,
Key: &p.Key,
Body: io.NopCloser(in),
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.CacheControl = aws.String(value)
case "content-disposition":
req.ContentDisposition = aws.String(value)
case "content-encoding":
req.ContentEncoding = aws.String(value)
case "content-type":
req.ContentType = aws.String(value)
}
}
var s3opts = []func(*s3.Options){}
// Can't retry single part uploads as only have an io.Reader
s3opts = append(s3opts, func(o *s3.Options) {
o.RetryMaxAttempts = 1
})
err = f.pacer.CallNoRetry(func() (bool, error) {
_, err = client.PutObject(ctx, req, s3opts...)
return f.shouldRetry(ctx, nil, err)
Body: in,
})
return
}
@@ -1508,30 +1319,8 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
}
if new.File == nil {
return nil, fmt.Errorf("invalid response: %+v", new)
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if new.Task != nil {
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
}
})()
// Note: The API might automatically append a numbered suffix to the filename,
// even if a file with the same name does not exist in the target directory.
if upName := f.opt.Enc.ToStandardName(new.File.Name); leaf != upName {
return nil, fserrors.NoRetryError(fmt.Errorf("uploaded file name mismatch: expected %q, got %q", leaf, upName))
}
// early return; in case of zero-byte objects or uploaded by matched gcid
if new.File.Phase == api.PhaseTypeComplete {
} else if new.File.Phase == api.PhaseTypeComplete {
// early return; in case of zero-byte objects
if acc, ok := in.(*accounting.Account); ok && acc != nil {
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
@@ -1541,10 +1330,22 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
return new.File, nil
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
})()
if uploadType == api.UploadTypeForm && new.Form != nil {
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
err = f.uploadByResumable(ctx, in, leaf, size, new.Resumable, options...)
err = f.uploadByResumable(ctx, in, leaf, size, new.Resumable)
} else {
err = fmt.Errorf("no method available for uploading: %+v", new)
}
@@ -1552,9 +1353,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
if err != nil {
return nil, fmt.Errorf("failed to upload: %w", err)
}
if new.Task == nil {
return new.File, nil
}
return new.File, f.waitTask(ctx, new.Task.ID)
}

View File

@@ -1,10 +1,10 @@
// Test PikPak filesystem interface
package pikpak
package pikpak_test
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/backend/pikpak"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -12,23 +12,6 @@ import (
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestPikPak:",
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
MaxChunkSize: maxChunkSize,
},
NilObject: (*pikpak.Object)(nil),
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetUploadCutoffer = (*Fs)(nil)
)

View File

@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err
}
usage = &fs.Usage{
Used: fs.NewUsageValue(info.SpaceUsed),
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
}
return usage, nil
}

View File

@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize

View File

@@ -101,12 +101,6 @@ var providerOption = fs.Option{
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "Exaba",
Help: "Exaba Object Storage",
}, {
Value: "FlashBlade",
Help: "Pure Storage FlashBlade Object Storage",
}, {
Value: "GCS",
Help: "Google Cloud Storage",
@@ -119,9 +113,6 @@ var providerOption = fs.Option{
}, {
Value: "IDrive",
Help: "IDrive e2",
}, {
Value: "Intercolo",
Help: "Intercolo Object Storage",
}, {
Value: "IONOS",
Help: "IONOS Cloud",
@@ -140,9 +131,6 @@ var providerOption = fs.Option{
}, {
Value: "Magalu",
Help: "Magalu Object Storage",
}, {
Value: "Mega",
Help: "MEGA S4 Object Storage",
}, {
Value: "Minio",
Help: "Minio Object Storage",
@@ -152,9 +140,6 @@ var providerOption = fs.Option{
}, {
Value: "Outscale",
Help: "OUTSCALE Object Storage (OOS)",
}, {
Value: "OVHcloud",
Help: "OVHcloud Object Storage",
}, {
Value: "Petabox",
Help: "Petabox Object Storage",
@@ -173,9 +158,6 @@ var providerOption = fs.Option{
}, {
Value: "Selectel",
Help: "Selectel Object Storage",
}, {
Value: "SpectraLogic",
Help: "Spectra Logic Black Pearl",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@@ -194,9 +176,6 @@ var providerOption = fs.Option{
}, {
Value: "Qiniu",
Help: "Qiniu Object Storage (Kodo)",
}, {
Value: "Zata",
Help: "Zata (S3 compatible Gateway)",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
@@ -502,22 +481,6 @@ func init() {
Value: "ap-northeast-1",
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
}},
}, {
Name: "region",
Help: "Region where you can connect with.\n",
Provider: "Zata",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "Indore, Madhya Pradesh, India",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "Intercolo",
Examples: []fs.OptionExample{{
Value: "de-fra",
Help: "Frankfurt, Germany",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
@@ -552,59 +515,6 @@ func init() {
Value: "ap-northeast-1",
Help: "Tokyo, Japan",
}},
}, {
// References:
// https://help.ovhcloud.com/csm/en-public-cloud-storage-s3-location?id=kb_article_view&sysparm_article=KB0047384
// https://support.us.ovhcloud.com/hc/en-us/articles/10667991081107-Endpoints-and-Object-Storage-Geoavailability
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "OVHcloud",
Examples: []fs.OptionExample{{
Value: "gra",
Help: "Gravelines, France",
}, {
Value: "rbx",
Help: "Roubaix, France",
}, {
Value: "sbg",
Help: "Strasbourg, France",
}, {
Value: "eu-west-par",
Help: "Paris, France (3AZ)",
}, {
Value: "de",
Help: "Frankfurt, Germany",
}, {
Value: "uk",
Help: "London, United Kingdom",
}, {
Value: "waw",
Help: "Warsaw, Poland",
}, {
Value: "bhs",
Help: "Beauharnois, Canada",
}, {
Value: "ca-east-tor",
Help: "Toronto, Canada",
}, {
Value: "sgp",
Help: "Singapore",
}, {
Value: "ap-southeast-syd",
Help: "Sydney, Australia",
}, {
Value: "ap-south-mum",
Help: "Mumbai, India",
}, {
Value: "us-east-va",
Help: "Vint Hill, Virginia, USA",
}, {
Value: "us-west-or",
Help: "Hillsboro, Oregon, USA",
}, {
Value: "rbx-archive",
Help: "Roubaix, France (Cold Archive)",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
@@ -657,7 +567,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,Intercolo,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -968,14 +878,6 @@ func init() {
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
Help: "Singapore Single Site Private Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Intercolo Object Storage.",
Provider: "Intercolo",
Examples: []fs.OptionExample{{
Value: "de-fra.i3storage.com",
Help: "Frankfurt, Germany",
}},
}, {
Name: "endpoint",
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
@@ -1101,12 +1003,6 @@ func init() {
Value: "us-iad-1.linodeobjects.com",
Help: "Washington, DC, (USA), us-iad-1",
}},
}, {
// Lyve Cloud endpoints
Name: "endpoint",
Help: "Endpoint for Lyve Cloud S3 API.\nRequired when using an S3 clone. Please type in your LyveCloud endpoint.\nExamples:\n- s3.us-west-1.{account_name}.lyve.seagate.com (US West 1 - California)\n- s3.eu-west-1.{account_name}.lyve.seagate.com (EU West 1 - Ireland)",
Provider: "LyveCloud",
Required: true,
}, {
// Magalu endpoints: https://docs.magalu.cloud/docs/object-storage/how-to/copy-url
Name: "endpoint",
@@ -1252,71 +1148,6 @@ func init() {
Value: "obs.ru-northwest-2.myhuaweicloud.com",
Help: "RU-Moscow2",
}},
}, {
Name: "endpoint",
Help: "Endpoint for OVHcloud Object Storage.",
Provider: "OVHcloud",
Examples: []fs.OptionExample{{
Value: "s3.gra.io.cloud.ovh.net",
Help: "OVHcloud Gravelines, France",
Provider: "OVHcloud",
}, {
Value: "s3.rbx.io.cloud.ovh.net",
Help: "OVHcloud Roubaix, France",
Provider: "OVHcloud",
}, {
Value: "s3.sbg.io.cloud.ovh.net",
Help: "OVHcloud Strasbourg, France",
Provider: "OVHcloud",
}, {
Value: "s3.eu-west-par.io.cloud.ovh.net",
Help: "OVHcloud Paris, France (3AZ)",
Provider: "OVHcloud",
}, {
Value: "s3.de.io.cloud.ovh.net",
Help: "OVHcloud Frankfurt, Germany",
Provider: "OVHcloud",
}, {
Value: "s3.uk.io.cloud.ovh.net",
Help: "OVHcloud London, United Kingdom",
Provider: "OVHcloud",
}, {
Value: "s3.waw.io.cloud.ovh.net",
Help: "OVHcloud Warsaw, Poland",
Provider: "OVHcloud",
}, {
Value: "s3.bhs.io.cloud.ovh.net",
Help: "OVHcloud Beauharnois, Canada",
Provider: "OVHcloud",
}, {
Value: "s3.ca-east-tor.io.cloud.ovh.net",
Help: "OVHcloud Toronto, Canada",
Provider: "OVHcloud",
}, {
Value: "s3.sgp.io.cloud.ovh.net",
Help: "OVHcloud Singapore",
Provider: "OVHcloud",
}, {
Value: "s3.ap-southeast-syd.io.cloud.ovh.net",
Help: "OVHcloud Sydney, Australia",
Provider: "OVHcloud",
}, {
Value: "s3.ap-south-mum.io.cloud.ovh.net",
Help: "OVHcloud Mumbai, India",
Provider: "OVHcloud",
}, {
Value: "s3.us-east-va.io.cloud.ovh.us",
Help: "OVHcloud Vint Hill, Virginia, USA",
Provider: "OVHcloud",
}, {
Value: "s3.us-west-or.io.cloud.ovh.us",
Help: "OVHcloud Hillsboro, Oregon, USA",
Provider: "OVHcloud",
}, {
Value: "s3.rbx-archive.io.cloud.ovh.net",
Help: "OVHcloud Roubaix, France (Cold Archive)",
Provider: "OVHcloud",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Scaleway Object Storage.",
@@ -1534,14 +1365,6 @@ func init() {
Value: "s3-ap-northeast-1.qiniucs.com",
Help: "Northeast Asia Endpoint 1",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Zata Object Storage.",
Provider: "Zata",
Examples: []fs.OptionExample{{
Value: "idr01.zata.ai",
Help: "South Asia Endpoint",
}},
}, {
// Selectel endpoints: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains
Name: "endpoint",
@@ -1554,7 +1377,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,Intercolo,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -1603,6 +1426,18 @@ func init() {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.us-east-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
Provider: "LyveCloud",
}, {
Value: "s3.us-west-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud US West 1 (California)",
Provider: "LyveCloud",
}, {
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
Provider: "LyveCloud",
}, {
Value: "oos.eu-west-2.outscale.com",
Help: "Outscale EU West 2 (Paris)",
@@ -1691,22 +1526,6 @@ func init() {
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
Provider: "ArvanCloud",
}, {
Value: "s3.eu-central-1.s4.mega.io",
Help: "Mega S4 eu-central-1 (Amsterdam)",
Provider: "Mega",
}, {
Value: "s3.eu-central-2.s4.mega.io",
Help: "Mega S4 eu-central-2 (Bettembourg)",
Provider: "Mega",
}, {
Value: "s3.ca-central-1.s4.mega.io",
Help: "Mega S4 ca-central-1 (Montreal)",
Provider: "Mega",
}, {
Value: "s3.ca-west-1.s4.mega.io",
Help: "Mega S4 ca-west-1 (Vancouver)",
Provider: "Mega",
}},
}, {
Name: "location_constraint",
@@ -2089,7 +1908,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,StackPath,Storj,TencentCOS,Petabox,Mega",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -2104,7 +1923,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade,Mega",
Provider: "!Storj,Selectel,Synology,Cloudflare",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@@ -2162,7 +1981,6 @@ isn't set then "acl" is used instead.
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
header is added and the default (private) will be used.
`,
Provider: "!Storj,Selectel,SpectraLogic,Synology,Cloudflare,FlashBlade",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
@@ -2571,11 +2389,6 @@ See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/lates
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}, {
Name: "use_arn_region",
Help: `If true, enables arn region support for the service.`,
Default: false,
Advanced: true,
}, {
Name: "leave_parts_on_error",
Provider: "AWS",
@@ -2783,7 +2596,7 @@ The parameter should be a date, "2006-01-02", datetime "2006-01-02
Note that when using this no file write operations are permitted,
so you can't upload files or delete them.
See [the time option docs](/docs/#time-options) for valid formats.
See [the time option docs](/docs/#time-option) for valid formats.
`,
Default: fs.Time{},
Advanced: true,
@@ -3123,7 +2936,6 @@ type Options struct {
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
UseARNRegion bool `config:"use_arn_region"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
ListChunk int32 `config:"list_chunk"`
ListVersion int `config:"list_version"`
@@ -3304,9 +3116,6 @@ func parsePath(path string) (root string) {
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
bucketPath = bucketPath[:len(bucketPath)-1]
}
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -3488,7 +3297,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
options = append(options, func(s3Opt *s3.Options) {
s3Opt.UsePathStyle = opt.ForcePathStyle
s3Opt.UseAccelerate = opt.UseAccelerateEndpoint
s3Opt.UseARNRegion = opt.UseARNRegion
// FIXME maybe this should be a tristate so can default to DualStackEndpointStateUnset?
if opt.UseDualStack {
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateEnabled
@@ -3687,9 +3495,6 @@ func setQuirks(opt *Options) {
case "Dreamhost":
urlEncodeListings = false
useAlreadyExists = false // untested
case "FlashBlade":
mightGzip = false // Never auto gzips objects
virtualHostStyle = false // supports vhost but defaults to paths
case "IBMCOS":
listObjectsV2 = false // untested
virtualHostStyle = false
@@ -3699,9 +3504,6 @@ func setQuirks(opt *Options) {
case "IDrive":
virtualHostStyle = false
useAlreadyExists = false // untested
case "Intercolo":
// no quirks
useUnsignedPayload = false // Intercolo has trailer support
case "IONOS":
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
virtualHostStyle = false
@@ -3725,14 +3527,6 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
useMultipartEtag = false
useAlreadyExists = false
case "Mega":
listObjectsV2 = true
virtualHostStyle = false
urlEncodeListings = true
useMultipartEtag = false
useAlreadyExists = false
// Multipart server side copies not supported
opt.CopyCutoff = math.MaxInt64
case "Minio":
virtualHostStyle = false
case "Netease":
@@ -3742,8 +3536,6 @@ func setQuirks(opt *Options) {
useAlreadyExists = false // untested
case "Outscale":
virtualHostStyle = false
case "OVHcloud":
// No quirks
case "RackCorp":
// No quirks
useMultipartEtag = false // untested
@@ -3774,8 +3566,6 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
useMultipartEtag = false // untested
useAlreadyExists = false // untested
case "SpectraLogic":
virtualHostStyle = false // path-style required
case "StackPath":
listObjectsV2 = false // untested
virtualHostStyle = false
@@ -3807,13 +3597,6 @@ func setQuirks(opt *Options) {
urlEncodeListings = false
virtualHostStyle = false
useAlreadyExists = false // untested
case "Zata":
useMultipartEtag = false
mightGzip = false
useUnsignedPayload = false
useAlreadyExists = false
case "Exaba":
virtualHostStyle = false
case "GCS":
// Google break request Signature by mutating accept-encoding HTTP header
// https://github.com/rclone/rclone/issues/6670
@@ -4615,7 +4398,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
foundItems += len(resp.Contents)
for i, object := range resp.Contents {
remote := *stringClone(deref(object.Key))
remote := deref(object.Key)
if urlEncodeListings {
remote, err = url.QueryUnescape(remote)
if err != nil {
@@ -4642,7 +4425,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
remote = remote[len(opt.prefix):]
if isDirectory {
// process directory markers as directories
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
}
if opt.addBucket {
remote = bucket.Join(opt.bucket, remote)
@@ -4957,7 +4740,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// mkdirParent creates the parent bucket/directory if it doesn't exist
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
remote, _ = strings.CutSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
@@ -5218,11 +5001,8 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
MultipartUpload: &types.CompletedMultipartUpload{
Parts: parts,
},
RequestPayer: req.RequestPayer,
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
SSECustomerKey: req.SSECustomerKey,
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
UploadId: uid,
RequestPayer: req.RequestPayer,
UploadId: uid,
})
return f.shouldRetry(ctx, err)
})
@@ -6071,7 +5851,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
func s3MetadataToMap(s3Meta map[string]string) map[string]string {
meta := make(map[string]string, len(s3Meta))
for k, v := range s3Meta {
meta[strings.ToLower(k)] = *stringClone(v)
meta[strings.ToLower(k)] = v
}
return meta
}
@@ -6114,14 +5894,14 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
o.lastModified = *resp.LastModified
}
}
o.mimeType = strings.Clone(deref(resp.ContentType))
o.mimeType = deref(resp.ContentType)
// Set system metadata
o.storageClass = stringClone(string(resp.StorageClass))
o.cacheControl = stringClonePointer(resp.CacheControl)
o.contentDisposition = stringClonePointer(resp.ContentDisposition)
o.contentEncoding = stringClonePointer(removeAWSChunked(resp.ContentEncoding))
o.contentLanguage = stringClonePointer(resp.ContentLanguage)
o.storageClass = (*string)(&resp.StorageClass)
o.cacheControl = resp.CacheControl
o.contentDisposition = resp.ContentDisposition
o.contentEncoding = resp.ContentEncoding
o.contentLanguage = resp.ContentLanguage
// If decompressing then size and md5sum are unknown
if o.fs.opt.Decompress && deref(o.contentEncoding) == "gzip" {
@@ -6188,36 +5968,6 @@ func (o *Object) Storable() bool {
return true
}
// removeAWSChunked removes the "aws-chunked" content-coding from a
// Content-Encoding field value (RFC 9110). Comparison is case-insensitive.
// Returns nil if encoding is empty after removal.
func removeAWSChunked(pv *string) *string {
if pv == nil {
return nil
}
v := *pv
if v == "" {
return nil
}
if !strings.Contains(strings.ToLower(v), "aws-chunked") {
return pv
}
parts := strings.Split(v, ",")
out := make([]string, 0, len(parts))
for _, p := range parts {
tok := strings.TrimSpace(p)
if tok == "" || strings.EqualFold(tok, "aws-chunked") {
continue
}
out = append(out, tok)
}
if len(out) == 0 {
return nil
}
v = strings.Join(out, ",")
return &v
}
func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
url := o.fs.opt.DownloadURL + bucketPath
var resp *http.Response
@@ -6247,8 +5997,8 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
metaData := make(map[string]string)
for key, value := range resp.Header {
key = strings.ToLower(key)
if after, ok := strings.CutPrefix(key, "x-amz-meta-"); ok {
metaKey := after
if strings.HasPrefix(key, "x-amz-meta-") {
metaKey := strings.TrimPrefix(key, "x-amz-meta-")
metaData[metaKey] = value[0]
}
}
@@ -6386,7 +6136,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
o.setMetaData(&head)
// Decompress body if necessary
if deref(removeAWSChunked(resp.ContentEncoding)) == "gzip" {
if deref(resp.ContentEncoding) == "gzip" {
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
return readers.NewGzipReader(resp.Body)
}
@@ -6636,11 +6386,8 @@ func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
MultipartUpload: &types.CompletedMultipartUpload{
Parts: w.completedParts,
},
RequestPayer: w.multiPartUploadInput.RequestPayer,
SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
UploadId: w.uploadID,
RequestPayer: w.multiPartUploadInput.RequestPayer,
UploadId: w.uploadID,
})
return w.f.shouldRetry(ctx, err)
})
@@ -6668,9 +6415,9 @@ func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.R
return wantETag, gotETag, versionID, ui, err
}
s3cw := chunkWriter.(*s3ChunkWriter)
gotETag = *stringClone(s3cw.eTag)
versionID = stringClone(s3cw.versionID)
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
gotETag = s3cw.eTag
versionID = aws.String(s3cw.versionID)
hashOfHashes := md5.Sum(s3cw.md5s)
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
@@ -6702,8 +6449,8 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
}
lastModified = time.Now()
if resp != nil {
etag = *stringClone(deref(resp.ETag))
versionID = stringClonePointer(resp.VersionId)
etag = deref(resp.ETag)
versionID = resp.VersionId
}
return etag, lastModified, versionID, nil
}
@@ -6755,8 +6502,8 @@ func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.P
if date, err := http.ParseTime(resp.Header.Get("Date")); err != nil {
lastModified = date
}
etag = *stringClone(resp.Header.Get("Etag"))
vID := *stringClone(resp.Header.Get("x-amz-version-id"))
etag = resp.Header.Get("Etag")
vID := resp.Header.Get("x-amz-version-id")
if vID != "" {
versionID = &vID
}
@@ -6810,7 +6557,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = removeAWSChunked(pv)
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
@@ -6907,7 +6654,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
case "content-disposition":
ui.req.ContentDisposition = aws.String(value)
case "content-encoding":
ui.req.ContentEncoding = removeAWSChunked(aws.String(value))
ui.req.ContentEncoding = aws.String(value)
case "content-language":
ui.req.ContentLanguage = aws.String(value)
case "content-type":

View File

@@ -248,47 +248,6 @@ func TestMergeDeleteMarkers(t *testing.T) {
}
}
func TestRemoveAWSChunked(t *testing.T) {
ps := func(s string) *string {
return &s
}
tests := []struct {
name string
in *string
want *string
}{
{"nil", nil, nil},
{"empty", ps(""), nil},
{"only aws", ps("aws-chunked"), nil},
{"leading aws", ps("aws-chunked, gzip"), ps("gzip")},
{"trailing aws", ps("gzip, aws-chunked"), ps("gzip")},
{"middle aws", ps("gzip, aws-chunked, br"), ps("gzip,br")},
{"case insensitive", ps("GZip, AwS-ChUnKeD, Br"), ps("GZip,Br")},
{"duplicates", ps("aws-chunked , aws-chunked"), nil},
{"no aws normalize spaces", ps(" gzip , br "), ps(" gzip , br ")},
{"surrounding spaces", ps(" aws-chunked "), nil},
{"no change", ps("gzip, br"), ps("gzip, br")},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := removeAWSChunked(tc.in)
check := func(want, got *string) {
t.Helper()
if tc.want == nil {
assert.Nil(t, got)
} else {
require.NotNil(t, got)
assert.Equal(t, *tc.want, *got)
}
}
check(tc.want, got)
// Idempotent
got2 := removeAWSChunked(got)
check(got, got2)
})
}
}
func (f *Fs) InternalTestVersions(t *testing.T) {
ctx := context.Background()

View File

@@ -111,8 +111,7 @@ func init() {
encoder.EncodeSlash |
encoder.EncodeBackSlash |
encoder.EncodeDoubleQuote |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
encoder.EncodeInvalidUtf8),
}},
})
}

View File

@@ -10,7 +10,6 @@ import (
"fmt"
"io"
iofs "io/fs"
"net/url"
"os"
"path"
"regexp"
@@ -222,45 +221,15 @@ E.g. the second example above should be rewritten as:
Help: "Windows Command Prompt",
},
},
}, {
Name: "hashes",
Help: `Comma separated list of supported checksum types.`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
Name: "md5sum_command",
Default: "",
Help: "The command used to read MD5 hashes.\n\nLeave blank for autodetect.",
Help: "The command used to read md5 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "sha1sum_command",
Default: "",
Help: "The command used to read SHA-1 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "crc32sum_command",
Default: "",
Help: "The command used to read CRC-32 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "sha256sum_command",
Default: "",
Help: "The command used to read SHA-256 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "blake3sum_command",
Default: "",
Help: "The command used to read BLAKE3 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "xxh3sum_command",
Default: "",
Help: "The command used to read XXH3 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "xxh128sum_command",
Default: "",
Help: "The command used to read XXH128 hashes.\n\nLeave blank for autodetect.",
Help: "The command used to read sha1 hashes.\n\nLeave blank for autodetect.",
Advanced: true,
}, {
Name: "skip_links",
@@ -513,14 +482,6 @@ Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`,
Advanced: true,
}, {
Name: "copy_is_hardlink",
Default: false,
@@ -565,14 +526,8 @@ type Options struct {
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
ShellType string `config:"shell_type"`
Hashes fs.CommaSepList `config:"hashes"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
Crc32sumCommand string `config:"crc32sum_command"`
Sha256sumCommand string `config:"sha256sum_command"`
Blake3sumCommand string `config:"blake3sum_command"`
Xxh3sumCommand string `config:"xxh3sum_command"`
Xxh128sumCommand string `config:"xxh128sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
@@ -590,7 +545,6 @@ type Options struct {
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
SSH fs.SpaceSepList `config:"ssh"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
CopyIsHardlink bool `config:"copy_is_hardlink"`
}
@@ -616,23 +570,17 @@ type Fs struct {
savedpswd string
sessions atomic.Int32 // count in use sessions
tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
type Object struct {
fs *Fs
remote string
size int64 // size of the object
modTime uint32 // modification time of the object as unix time
mode os.FileMode // mode bits from the file
md5sum *string // Cached MD5 checksum
sha1sum *string // Cached SHA-1 checksum
crc32sum *string // Cached CRC-32 checksum
sha256sum *string // Cached SHA-256 checksum
blake3sum *string // Cached BLAKE3 checksum
xxh3sum *string // Cached XXH3 checksum
xxh128sum *string // Cached XXH128 checksum
fs *Fs
remote string
size int64 // size of the object
modTime uint32 // modification time of the object as unix time
mode os.FileMode // mode bits from the file
md5sum *string // Cached MD5 checksum
sha1sum *string // Cached SHA1 checksum
}
// conn encapsulates an ssh client and corresponding sftp client
@@ -919,20 +867,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.Port = "22"
}
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
Timeout: time.Duration(f.ci.ConnectTimeout),
Timeout: f.ci.ConnectTimeout,
ClientVersion: "SSH-2.0-" + f.ci.UserAgent,
}
@@ -1684,112 +1623,14 @@ func (f *Fs) Hashes() hash.Set {
return *f.cachedHashes
}
hashTypesSupported := hash.NewHashSet()
f.cachedHashes = &hashTypesSupported
hashSet := hash.NewHashSet()
f.cachedHashes = &hashSet
if f.opt.DisableHashCheck || f.shellType == shellTypeNotSupported {
return hashTypesSupported
}
hashTypes := hash.NewHashSet()
if len(f.opt.Hashes) > 0 {
for _, hashName := range f.opt.Hashes {
var hashType hash.Type
if err := hashType.Set(hashName); err != nil {
fs.Infof(nil, "Invalid token %q in hash string %q", hashName, f.opt.Hashes.String())
}
hashTypes.Add(hashType)
}
} else {
hashTypes.Add(hash.MD5, hash.SHA1)
}
hashCommands := map[hash.Type]struct {
option *string
emptyHash string
hashCommands []struct{ hashFile, hashEmpty string }
}{
hash.MD5: {
&f.opt.Md5sumCommand,
"d41d8cd98f00b204e9800998ecf8427e",
[]struct{ hashFile, hashEmpty string }{
{"md5sum", "md5sum"},
{"md5 -r", "md5 -r"},
{"rclone md5sum", "rclone md5sum"},
},
},
hash.SHA1: {
&f.opt.Sha1sumCommand,
"da39a3ee5e6b4b0d3255bfef95601890afd80709",
[]struct{ hashFile, hashEmpty string }{
{"sha1sum", "sha1sum"},
{"sha1 -r", "sha1 -r"},
{"rclone sha1sum", "rclone sha1sum"},
},
},
hash.CRC32: {
&f.opt.Sha1sumCommand,
"00000000",
[]struct{ hashFile, hashEmpty string }{
{"crc32", "crc32"},
{"rclone hashsum crc32", "rclone hashsum crc32"},
},
},
hash.SHA256: {
&f.opt.Sha256sumCommand,
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
[]struct{ hashFile, hashEmpty string }{
{"sha256sum", "sha1sum"},
{"sha256 -r", "sha1 -r"},
{"rclone hashsum sha256", "rclone hashsum sha256"},
},
},
hash.BLAKE3: {
&f.opt.Blake3sumCommand,
"af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262",
[]struct{ hashFile, hashEmpty string }{
{"b3sum", "b3sum"},
{"rclone hashsum blake3", "rclone hashsum blake3"},
},
},
hash.XXH3: {
&f.opt.Xxh3sumCommand,
"2d06800538d394c2",
[]struct{ hashFile, hashEmpty string }{
// The xxhsum tool uses a non-standard prefix "XXH3_" preceding the hash output for the 64-bit variant
// of XXH3, to avoid confusion with the older 64-bit algorithm XXH64. This was introduced in version
// 0.8.3 released Dec 30, 2024. Older versions only supported the alternative BSD style output format,
// otherwise optional with argument --tag. We are currently not expecting these output formats and can
// therefore not use the "xxhsum -H3" command or its xxh3sum alias directly.
//{"xxh3sum", "xxh3sum"},
//{"xxhsum -H3", "xxhsum -H3"},
{"rclone hashsum xxh3", "rclone hashsum xxh3"},
},
},
hash.XXH128: {
&f.opt.Xxh128sumCommand,
"99aa06d3014798d86001c324468d497f",
[]struct{ hashFile, hashEmpty string }{
{"xxh128sum", "xxh128sum"},
{"xxhsum -H2", "xxhsum -H2"},
{"rclone hashsum xxh128", "rclone hashsum xxh128"},
},
},
}
if f.shellType == "powershell" {
for _, hashType := range []hash.Type{hash.MD5, hash.SHA1, hash.SHA256} {
if entry, ok := hashCommands[hashType]; ok {
entry.hashCommands = append(hashCommands[hashType].hashCommands, struct {
hashFile, hashEmpty string
}{
fmt.Sprintf("&{param($Path);Get-FileHash -Algorithm %v -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}", hashType),
fmt.Sprintf("Get-FileHash -Algorithm %v -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}", hashType),
})
hashCommands[hashType] = entry
}
}
return hashSet
}
// look for a hash command which works
checkHash := func(hashType hash.Type, commands []struct{ hashFile, hashEmpty string }, expected string, hashCommand *string, changed *bool) bool {
if *hashCommand == hashCommandNotSupported {
return false
@@ -1818,25 +1659,55 @@ func (f *Fs) Hashes() hash.Set {
}
changed := false
for _, hashType := range hashTypes.Array() {
if entry, ok := hashCommands[hashType]; ok {
if works := checkHash(hashType, entry.hashCommands, entry.emptyHash, entry.option, &changed); works {
hashTypesSupported.Add(hashType)
}
}
md5Commands := []struct {
hashFile, hashEmpty string
}{
{"md5sum", "md5sum"},
{"md5 -r", "md5 -r"},
{"rclone md5sum", "rclone md5sum"},
}
sha1Commands := []struct {
hashFile, hashEmpty string
}{
{"sha1sum", "sha1sum"},
{"sha1 -r", "sha1 -r"},
{"rclone sha1sum", "rclone sha1sum"},
}
if f.shellType == "powershell" {
md5Commands = append(md5Commands, struct {
hashFile, hashEmpty string
}{
"&{param($Path);Get-FileHash -Algorithm MD5 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
"Get-FileHash -Algorithm MD5 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
})
sha1Commands = append(sha1Commands, struct {
hashFile, hashEmpty string
}{
"&{param($Path);Get-FileHash -Algorithm SHA1 -LiteralPath $Path -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{\"$($_.ToLower()) ${Path}\"}}",
"Get-FileHash -Algorithm SHA1 -InputStream ([System.IO.MemoryStream]::new()) -ErrorAction Stop|Select-Object -First 1 -ExpandProperty Hash|ForEach-Object{$_.ToLower()}",
})
}
md5Works := checkHash(hash.MD5, md5Commands, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
sha1Works := checkHash(hash.SHA1, sha1Commands, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
if changed {
// Save permanently in config to avoid the extra work next time
for _, hashType := range hashTypes.Array() {
if entry, ok := hashCommands[hashType]; ok {
fs.Debugf(f, "Setting hash command for %v to %q (set %vsum_command to override)", hashType, *entry.option, hashType)
f.m.Set(fmt.Sprintf("%vsum_command", hashType), *entry.option)
}
}
fs.Debugf(f, "Setting hash command for %v to %q (set sha1sum_command to override)", hash.MD5, f.opt.Md5sumCommand)
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
fs.Debugf(f, "Setting hash command for %v to %q (set md5sum_command to override)", hash.SHA1, f.opt.Sha1sumCommand)
f.m.Set("sha1sum_command", f.opt.Sha1sumCommand)
}
return hashTypesSupported
if sha1Works {
hashSet.Add(hash.SHA1)
}
if md5Works {
hashSet.Add(hash.MD5)
}
return hashSet
}
// About gets usage stats
@@ -1863,9 +1734,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
free := vfsStats.FreeSpace()
used := total - free
return &fs.Usage{
Total: fs.NewUsageValue(total),
Used: fs.NewUsageValue(used),
Free: fs.NewUsageValue(free),
Total: fs.NewUsageValue(int64(total)),
Used: fs.NewUsageValue(int64(used)),
Free: fs.NewUsageValue(int64(free)),
}, nil
} else if err != nil {
if errors.Is(err, os.ErrNotExist) {
@@ -1971,43 +1842,17 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
_ = o.fs.Hashes()
var hashCmd string
switch r {
case hash.MD5:
if r == hash.MD5 {
if o.md5sum != nil {
return *o.md5sum, nil
}
hashCmd = o.fs.opt.Md5sumCommand
case hash.SHA1:
} else if r == hash.SHA1 {
if o.sha1sum != nil {
return *o.sha1sum, nil
}
hashCmd = o.fs.opt.Sha1sumCommand
case hash.CRC32:
if o.crc32sum != nil {
return *o.crc32sum, nil
}
hashCmd = o.fs.opt.Crc32sumCommand
case hash.SHA256:
if o.sha256sum != nil {
return *o.sha256sum, nil
}
hashCmd = o.fs.opt.Sha256sumCommand
case hash.BLAKE3:
if o.blake3sum != nil {
return *o.blake3sum, nil
}
hashCmd = o.fs.opt.Blake3sumCommand
case hash.XXH3:
if o.xxh3sum != nil {
return *o.xxh3sum, nil
}
hashCmd = o.fs.opt.Xxh3sumCommand
case hash.XXH128:
if o.xxh128sum != nil {
return *o.xxh128sum, nil
}
hashCmd = o.fs.opt.Xxh128sumCommand
default:
} else {
return "", hash.ErrUnsupported
}
if hashCmd == "" || hashCmd == hashCommandNotSupported {
@@ -2024,21 +1869,10 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
}
hashString := parseHash(outBytes)
fs.Debugf(o, "Parsed hash: %s", hashString)
switch r {
case hash.MD5:
if r == hash.MD5 {
o.md5sum = &hashString
case hash.SHA1:
} else if r == hash.SHA1 {
o.sha1sum = &hashString
case hash.CRC32:
o.crc32sum = &hashString
case hash.SHA256:
o.sha256sum = &hashString
case hash.BLAKE3:
o.blake3sum = &hashString
case hash.XXH3:
o.xxh3sum = &hashString
case hash.XXH128:
o.xxh128sum = &hashString
}
return hashString, nil
}
@@ -2103,7 +1937,7 @@ func (f *Fs) remoteShellPath(remote string) string {
}
// Converts a byte array from the SSH session returned by
// an invocation of hash command to a hash string
// an invocation of md5sum/sha1sum to a hash string
// as expected by the rest of this application
func parseHash(bytes []byte) string {
// For strings with backslash *sum writes a leading \
@@ -2332,11 +2166,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Clear the hash cache since we are about to update the object
o.md5sum = nil
o.sha1sum = nil
o.crc32sum = nil
o.sha256sum = nil
o.blake3sum = nil
o.xxh3sum = nil
o.xxh128sum = nil
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return fmt.Errorf("Update: %w", err)

View File

@@ -31,8 +31,6 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, addr)
}

View File

@@ -38,7 +38,7 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
d := &smb2.Dialer{}
if f.opt.UseKerberos {
cl, err := NewKerberosFactory().GetClient(f.opt.KerberosCCache)
cl, err := getKerberosClient()
if err != nil {
return nil, err
}

View File

@@ -1,99 +0,0 @@
package smb
import (
"context"
"fmt"
"os"
"sync"
"github.com/cloudsoda/go-smb2"
"golang.org/x/sync/errgroup"
)
// FsInterface defines the methods that filePool needs from Fs
type FsInterface interface {
getConnection(ctx context.Context, share string) (*conn, error)
putConnection(pc **conn, err error)
removeSession()
}
type file struct {
*smb2.File
c *conn
}
type filePool struct {
ctx context.Context
fs FsInterface
share string
path string
mu sync.Mutex
pool []*file
}
func newFilePool(ctx context.Context, fs FsInterface, share, path string) *filePool {
return &filePool{
ctx: ctx,
fs: fs,
share: share,
path: path,
}
}
func (p *filePool) get() (*file, error) {
p.mu.Lock()
if len(p.pool) > 0 {
f := p.pool[len(p.pool)-1]
p.pool = p.pool[:len(p.pool)-1]
p.mu.Unlock()
return f, nil
}
p.mu.Unlock()
c, err := p.fs.getConnection(p.ctx, p.share)
if err != nil {
return nil, err
}
fl, err := c.smbShare.OpenFile(p.path, os.O_WRONLY, 0o644)
if err != nil {
p.fs.putConnection(&c, err)
return nil, fmt.Errorf("failed to open: %w", err)
}
return &file{File: fl, c: c}, nil
}
func (p *filePool) put(f *file, err error) {
if f == nil {
return
}
if err != nil {
_ = f.Close()
p.fs.putConnection(&f.c, err)
return
}
p.mu.Lock()
p.pool = append(p.pool, f)
p.mu.Unlock()
}
func (p *filePool) drain() error {
p.mu.Lock()
files := p.pool
p.pool = nil
p.mu.Unlock()
g, _ := errgroup.WithContext(p.ctx)
for _, f := range files {
g.Go(func() error {
err := f.Close()
p.fs.putConnection(&f.c, err)
return err
})
}
return g.Wait()
}

View File

@@ -1,228 +0,0 @@
package smb
import (
"context"
"errors"
"sync"
"testing"
"github.com/cloudsoda/go-smb2"
"github.com/stretchr/testify/assert"
)
// Mock Fs that implements FsInterface
type mockFs struct {
mu sync.Mutex
putConnectionCalled bool
putConnectionErr error
getConnectionCalled bool
getConnectionErr error
getConnectionResult *conn
removeSessionCalled bool
}
func (m *mockFs) putConnection(pc **conn, err error) {
m.mu.Lock()
defer m.mu.Unlock()
m.putConnectionCalled = true
m.putConnectionErr = err
}
func (m *mockFs) getConnection(ctx context.Context, share string) (*conn, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.getConnectionCalled = true
if m.getConnectionErr != nil {
return nil, m.getConnectionErr
}
if m.getConnectionResult != nil {
return m.getConnectionResult, nil
}
return &conn{}, nil
}
func (m *mockFs) removeSession() {
m.mu.Lock()
defer m.mu.Unlock()
m.removeSessionCalled = true
}
func (m *mockFs) isPutConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionCalled
}
func (m *mockFs) getPutConnectionErr() error {
m.mu.Lock()
defer m.mu.Unlock()
return m.putConnectionErr
}
func (m *mockFs) isGetConnectionCalled() bool {
m.mu.Lock()
defer m.mu.Unlock()
return m.getConnectionCalled
}
func newMockFs() *mockFs {
return &mockFs{}
}
// Helper function to create a mock file
func newMockFile() *file {
return &file{
File: &smb2.File{},
c: &conn{},
}
}
// Test filePool creation
func TestNewFilePool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
share := "testshare"
path := "/test/path"
pool := newFilePool(ctx, fs, share, path)
assert.NotNil(t, pool)
assert.Equal(t, ctx, pool.ctx)
assert.Equal(t, fs, pool.fs)
assert.Equal(t, share, pool.share)
assert.Equal(t, path, pool.path)
assert.Empty(t, pool.pool)
}
// Test getting file from pool when pool has files
func TestFilePool_Get_FromPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add a mock file to the pool
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
// Get file from pool
f, err := pool.get()
assert.NoError(t, err)
assert.NotNil(t, f)
assert.Equal(t, mockFile, f)
assert.Empty(t, pool.pool)
}
// Test getting file when pool is empty
func TestFilePool_Get_EmptyPool(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
// Set up the mock to return an error from getConnection
// This tests that the pool calls getConnection when empty
fs.getConnectionErr = errors.New("connection failed")
pool := newFilePool(ctx, fs, "testshare", "test/path")
// This should call getConnection and return the error
f, err := pool.get()
assert.Error(t, err)
assert.Nil(t, f)
assert.True(t, fs.isGetConnectionCalled())
assert.Equal(t, "connection failed", err.Error())
}
// Test putting file successfully
func TestFilePool_Put_Success(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, nil)
assert.Len(t, pool.pool, 1)
assert.Equal(t, mockFile, pool.pool[0])
}
// Test putting file with error
func TestFilePool_Put_WithError(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
mockFile := newMockFile()
pool.put(mockFile, errors.New("write error"))
// Should call putConnection with error
assert.True(t, fs.isPutConnectionCalled())
assert.Equal(t, errors.New("write error"), fs.getPutConnectionErr())
assert.Empty(t, pool.pool)
}
// Test putting nil file
func TestFilePool_Put_NilFile(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Should not panic
pool.put(nil, nil)
pool.put(nil, errors.New("some error"))
assert.Empty(t, pool.pool)
}
// Test draining pool with files
func TestFilePool_Drain_WithFiles(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
// Add mock files to pool
mockFile1 := newMockFile()
mockFile2 := newMockFile()
pool.pool = append(pool.pool, mockFile1, mockFile2)
// Before draining
assert.Len(t, pool.pool, 2)
_ = pool.drain()
assert.Empty(t, pool.pool)
}
// Test concurrent access to pool
func TestFilePool_ConcurrentAccess(t *testing.T) {
ctx := context.Background()
fs := newMockFs()
pool := newFilePool(ctx, fs, "testshare", "/test/path")
const numGoroutines = 10
for range numGoroutines {
mockFile := newMockFile()
pool.pool = append(pool.pool, mockFile)
}
// Test concurrent get operations
done := make(chan bool, numGoroutines)
for range numGoroutines {
go func() {
defer func() { done <- true }()
f, err := pool.get()
if err == nil {
pool.put(f, nil)
}
}()
}
for range numGoroutines {
<-done
}
// Pool should be in a consistent after the concurrence access
assert.Len(t, pool.pool, numGoroutines)
}

View File

@@ -7,132 +7,72 @@ import (
"path/filepath"
"strings"
"sync"
"time"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
)
// KerberosFactory encapsulates dependencies and caches for Kerberos clients.
type KerberosFactory struct {
// clientCache caches Kerberos clients keyed by resolved ccache path.
// Clients are reused unless the associated ccache file changes.
clientCache sync.Map // map[string]*client.Client
var (
kerberosClient *client.Client
kerberosErr error
kerberosOnce sync.Once
)
// errCache caches errors encountered when loading Kerberos clients.
// Prevents repeated attempts for paths that previously failed.
errCache sync.Map // map[string]error
// getKerberosClient returns a Kerberos client that can be used to authenticate.
func getKerberosClient() (*client.Client, error) {
if kerberosClient == nil || kerberosErr == nil {
kerberosOnce.Do(func() {
kerberosClient, kerberosErr = createKerberosClient()
})
}
// modTimeCache tracks the last known modification time of ccache files.
// Used to detect changes and trigger credential refresh.
modTimeCache sync.Map // map[string]time.Time
loadCCache func(string) (*credentials.CCache, error)
newClient func(*credentials.CCache, *config.Config, ...func(*client.Settings)) (*client.Client, error)
loadConfig func() (*config.Config, error)
return kerberosClient, kerberosErr
}
// NewKerberosFactory creates a new instance of KerberosFactory with default dependencies.
func NewKerberosFactory() *KerberosFactory {
return &KerberosFactory{
loadCCache: credentials.LoadCCache,
newClient: client.NewFromCCache,
loadConfig: defaultLoadKerberosConfig,
}
}
// GetClient returns a cached Kerberos client or creates a new one if needed.
func (kf *KerberosFactory) GetClient(ccachePath string) (*client.Client, error) {
resolvedPath, err := resolveCcachePath(ccachePath)
if err != nil {
return nil, err
}
stat, err := os.Stat(resolvedPath)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
mtime := stat.ModTime()
if oldMod, ok := kf.modTimeCache.Load(resolvedPath); ok {
if oldTime, ok := oldMod.(time.Time); ok && oldTime.Equal(mtime) {
if errVal, ok := kf.errCache.Load(resolvedPath); ok {
return nil, errVal.(error)
}
if clientVal, ok := kf.clientCache.Load(resolvedPath); ok {
return clientVal.(*client.Client), nil
}
}
}
// Load Kerberos config
cfg, err := kf.loadConfig()
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Load ccache
ccache, err := kf.loadCCache(resolvedPath)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Create new client
cl, err := kf.newClient(ccache, cfg)
if err != nil {
kf.errCache.Store(resolvedPath, err)
return nil, err
}
// Cache and return
kf.clientCache.Store(resolvedPath, cl)
kf.errCache.Delete(resolvedPath)
kf.modTimeCache.Store(resolvedPath, mtime)
return cl, nil
}
// resolveCcachePath resolves the KRB5 ccache path.
func resolveCcachePath(ccachePath string) (string, error) {
if ccachePath == "" {
ccachePath = os.Getenv("KRB5CCNAME")
}
switch {
case strings.Contains(ccachePath, ":"):
parts := strings.SplitN(ccachePath, ":", 2)
prefix, path := parts[0], parts[1]
switch prefix {
case "FILE":
return path, nil
case "DIR":
primary, err := os.ReadFile(filepath.Join(path, "primary"))
if err != nil {
return "", err
}
return filepath.Join(path, strings.TrimSpace(string(primary))), nil
default:
return "", fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
}
case ccachePath == "":
u, err := user.Current()
if err != nil {
return "", err
}
return "/tmp/krb5cc_" + u.Uid, nil
default:
return ccachePath, nil
}
}
// defaultLoadKerberosConfig loads Kerberos config from default or env path.
func defaultLoadKerberosConfig() (*config.Config, error) {
// createKerberosClient creates a new Kerberos client.
func createKerberosClient() (*client.Client, error) {
cfgPath := os.Getenv("KRB5_CONFIG")
if cfgPath == "" {
cfgPath = "/etc/krb5.conf"
}
return config.Load(cfgPath)
cfg, err := config.Load(cfgPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
switch {
case strings.Contains(ccachePath, ":"):
parts := strings.SplitN(ccachePath, ":", 2)
switch parts[0] {
case "FILE":
ccachePath = parts[1]
case "DIR":
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
if err != nil {
return nil, err
}
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
default:
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
}
case ccachePath == "":
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = "/tmp/krb5cc_" + u.Uid
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
return client.NewFromCCache(ccache, cfg)
}

View File

@@ -1,142 +0,0 @@
package smb
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/stretchr/testify/assert"
)
func TestResolveCcachePath(t *testing.T) {
tmpDir := t.TempDir()
// Setup: files for FILE and DIR modes
fileCcache := filepath.Join(tmpDir, "file_ccache")
err := os.WriteFile(fileCcache, []byte{}, 0600)
assert.NoError(t, err)
dirCcache := filepath.Join(tmpDir, "dir_ccache")
err = os.Mkdir(dirCcache, 0755)
assert.NoError(t, err)
err = os.WriteFile(filepath.Join(dirCcache, "primary"), []byte("ticket"), 0600)
assert.NoError(t, err)
dirCcacheTicket := filepath.Join(dirCcache, "ticket")
err = os.WriteFile(dirCcacheTicket, []byte{}, 0600)
assert.NoError(t, err)
tests := []struct {
name string
ccachePath string
envKRB5CCNAME string
expected string
expectError bool
}{
{
name: "FILE: prefix from env",
ccachePath: "",
envKRB5CCNAME: "FILE:" + fileCcache,
expected: fileCcache,
},
{
name: "DIR: prefix from env",
ccachePath: "",
envKRB5CCNAME: "DIR:" + dirCcache,
expected: dirCcacheTicket,
},
{
name: "Unsupported prefix",
ccachePath: "",
envKRB5CCNAME: "MEMORY:/bad/path",
expectError: true,
},
{
name: "Direct file path (no prefix)",
ccachePath: "/tmp/myccache",
expected: "/tmp/myccache",
},
{
name: "Default to /tmp/krb5cc_<uid>",
ccachePath: "",
envKRB5CCNAME: "",
expected: "/tmp/krb5cc_",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Setenv("KRB5CCNAME", tt.envKRB5CCNAME)
result, err := resolveCcachePath(tt.ccachePath)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.Contains(t, result, tt.expected)
}
})
}
}
func TestKerberosFactory_GetClient_ReloadOnCcacheChange(t *testing.T) {
// Create temp ccache file
tmpFile, err := os.CreateTemp("", "krb5cc_test")
assert.NoError(t, err)
defer func() {
if err := os.Remove(tmpFile.Name()); err != nil {
t.Logf("Failed to remove temp file %s: %v", tmpFile.Name(), err)
}
}()
unixPath := filepath.ToSlash(tmpFile.Name())
ccachePath := "FILE:" + unixPath
initialContent := []byte("CCACHE_VERSION 4\n")
_, err = tmpFile.Write(initialContent)
assert.NoError(t, err)
assert.NoError(t, tmpFile.Close())
// Setup mocks
loadCallCount := 0
mockLoadCCache := func(path string) (*credentials.CCache, error) {
loadCallCount++
return &credentials.CCache{}, nil
}
mockNewClient := func(cc *credentials.CCache, cfg *config.Config, opts ...func(*client.Settings)) (*client.Client, error) {
return &client.Client{}, nil
}
mockLoadConfig := func() (*config.Config, error) {
return &config.Config{}, nil
}
factory := &KerberosFactory{
loadCCache: mockLoadCCache,
newClient: mockNewClient,
loadConfig: mockLoadConfig,
}
// First call — triggers loading
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 1, loadCallCount, "expected 1 load call")
// Second call — should reuse cache, no additional load
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 1, loadCallCount, "expected cached reuse, no new load")
// Simulate file update
time.Sleep(1 * time.Second) // ensure mtime changes
err = os.WriteFile(tmpFile.Name(), []byte("CCACHE_VERSION 4\n#updated"), 0600)
assert.NoError(t, err)
// Third call — should detect change, reload
_, err = factory.GetClient(ccachePath)
assert.NoError(t, err)
assert.Equal(t, 2, loadCallCount, "expected reload on changed ccache")
}

View File

@@ -3,7 +3,6 @@ package smb
import (
"context"
"errors"
"fmt"
"io"
"os"
@@ -108,20 +107,6 @@ Set to 0 to keep connections indefinitely.
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
Default: true,
Advanced: true,
}, {
Name: "kerberos_ccache",
Help: `Path to the Kerberos credential cache (krb5cc).
Overrides the default KRB5CCNAME environment variable and allows this
instance of the SMB backend to use a different Kerberos cache file.
This is useful when mounting multiple SMB with different credentials
or running in multi-user environments.
Supported formats:
- FILE:/path/to/ccache Use the specified file.
- DIR:/path/to/ccachedir Use the primary file inside the specified directory.
- /path/to/ccache Interpreted as a file path.`,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -152,7 +137,6 @@ type Options struct {
Domain string `config:"domain"`
SPN string `config:"spn"`
UseKerberos bool `config:"use_kerberos"`
KerberosCCache string `config:"kerberos_ccache"`
HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"`
@@ -495,82 +479,22 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return nil, err
}
bs := stat.BlockSize()
bs := int64(stat.BlockSize())
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * stat.TotalBlockCount()),
Used: fs.NewUsageValue(bs * (stat.TotalBlockCount() - stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * stat.AvailableBlockCount()),
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
}
return usage, nil
}
type smbWriterAt struct {
pool *filePool
closed bool
closeMu sync.Mutex
wg sync.WaitGroup
}
func (w *smbWriterAt) WriteAt(p []byte, off int64) (int, error) {
w.closeMu.Lock()
if w.closed {
w.closeMu.Unlock()
return 0, errors.New("writer already closed")
}
w.wg.Add(1)
w.closeMu.Unlock()
defer w.wg.Done()
f, err := w.pool.get()
if err != nil {
return 0, fmt.Errorf("failed to get file from pool: %w", err)
}
n, writeErr := f.WriteAt(p, off)
w.pool.put(f, writeErr)
if writeErr != nil {
return n, fmt.Errorf("failed to write at offset %d: %w", off, writeErr)
}
return n, writeErr
}
func (w *smbWriterAt) Close() error {
w.closeMu.Lock()
defer w.closeMu.Unlock()
if w.closed {
return nil
}
w.closed = true
// Wait for all pending writes to finish
w.wg.Wait()
var errs []error
// Drain the pool
if err := w.pool.drain(); err != nil {
errs = append(errs, fmt.Errorf("failed to drain file pool: %w", err))
}
// Remove session
w.pool.fs.removeSession()
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
var err error
o := &Object{
fs: f,
remote: remote,
@@ -580,42 +504,27 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
return nil, fs.ErrorIsDir
}
err := o.fs.ensureDirectory(ctx, share, filename)
err = o.fs.ensureDirectory(ctx, share, filename)
if err != nil {
return nil, fmt.Errorf("failed to make parent directories: %w", err)
}
smbPath := o.fs.toSambaPath(filename)
filename = o.fs.toSambaPath(filename)
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
// One-time truncate
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return nil, err
}
file, err := cn.smbShare.OpenFile(smbPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
o.fs.putConnection(&cn, err)
return nil, err
return nil, fmt.Errorf("failed to open: %w", err)
}
if size > 0 {
if truncateErr := file.Truncate(size); truncateErr != nil {
_ = file.Close()
o.fs.putConnection(&cn, truncateErr)
return nil, fmt.Errorf("failed to truncate file: %w", truncateErr)
}
}
if closeErr := file.Close(); closeErr != nil {
o.fs.putConnection(&cn, closeErr)
return nil, fmt.Errorf("failed to close file after truncate: %w", closeErr)
}
o.fs.putConnection(&cn, nil)
// Add a new session
o.fs.addSession()
return &smbWriterAt{
pool: newFilePool(ctx, o.fs, share, smbPath),
}, nil
return fl, nil
}
// Shutdown the backend, closing any background tasks and any

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/rclone/rclone/backend/smb"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
@@ -19,9 +18,6 @@ func TestIntegration(t *testing.T) {
}
func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
krb5Dir := t.TempDir()
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
@@ -30,24 +26,3 @@ func TestIntegration2(t *testing.T) {
NilObject: (*smb.Object)(nil),
})
}
func TestIntegration3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
krb5Dir := t.TempDir()
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
ccache := filepath.Join(krb5Dir, "ccache")
t.Setenv("RCLONE_TEST_CUSTOM_CCACHE_LOCATION", ccache)
name := "TestSMBKerberosCcache"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":rclone",
NilObject: (*smb.Object)(nil),
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "kerberos_ccache", Value: ccache},
},
})
}

View File

@@ -491,8 +491,8 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: time.Duration(10 * ci.ConnectTimeout), // Use the timeouts in the transport
Timeout: time.Duration(10 * ci.Timeout), // Use the timeouts in the transport
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
FetchUntilEmptyPage: opt.FetchUntilEmptyPage,
PartialPageFetchThreshold: opt.PartialPageFetchThreshold,
@@ -773,20 +773,21 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return fs.ErrorListBucketRequired
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
return callback(entry)
entries = append(entries, entry)
return nil
})
if err != nil {
return err
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return nil
return entries, nil
}
// listContainers lists the containers
@@ -817,46 +818,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return list.Flush()
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1681,7 +1650,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -1,7 +1,7 @@
// Package common defines code common to the union and the policies
//
// These need to be defined in a separate package to avoid import loops
package common //nolint:revive // Don't include revive when running golangci-lint because this triggers var-naming: avoid meaningless package names
package common
import "github.com/rclone/rclone/fs"

View File

@@ -21,6 +21,7 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, u := range upstreams {
u := u // Closure
go func() {
rfs := u.RootFs
remote := path.Join(u.RootPath, filePath)

View File

@@ -123,7 +123,7 @@ func (p *Prop) Hashes() (hashes map[hash.Type]string) {
hashes = make(map[hash.Type]string)
for _, checksums := range p.Checksums {
checksums = strings.ToLower(checksums)
for checksum := range strings.SplitSeq(checksums, " ") {
for _, checksum := range strings.Split(checksums, " ") {
switch {
case strings.HasPrefix(checksum, "sha1:"):
hashes[hash.SHA1] = checksum[5:]

View File

@@ -1,40 +0,0 @@
package webdav
import (
"errors"
"fmt"
)
var (
// ErrChunkSize is returned when the chunk size is zero
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
// ErrNilLogger is returned when the logger is nil
ErrNilLogger = errors.New("tus logger can't be nil")
// ErrNilStore is returned when the store is nil
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
// ErrNilUpload is returned when the upload is nil
ErrNilUpload = errors.New("tus upload can't be nil")
// ErrLargeUpload is returned when the upload body is to large
ErrLargeUpload = errors.New("tus upload body is to large")
// ErrVersionMismatch is returned when the tus protocol version is mismatching
ErrVersionMismatch = errors.New("tus protocol version mismatch")
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
// ErrUploadNotFound is returned when the tus upload is not found
ErrUploadNotFound = errors.New("tus upload not found")
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
)
// ClientError represents an error state of a client
type ClientError struct {
Code int
Body []byte
}
// Error returns an error string containing the client error code
func (c ClientError) Error() string {
return fmt.Sprintf("unexpected status code: %d", c.Code)
}

View File

@@ -1,88 +0,0 @@
package webdav
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"strings"
)
// Metadata is a typedef for a string to string map to hold metadata
type Metadata map[string]string
// Upload is a struct containing the file status during upload
type Upload struct {
stream io.ReadSeeker
size int64
offset int64
Fingerprint string
Metadata Metadata
}
// Updates the Upload information based on offset.
func (u *Upload) updateProgress(offset int64) {
u.offset = offset
}
// Finished returns whether this upload is finished or not.
func (u *Upload) Finished() bool {
return u.offset >= u.size
}
// Progress returns the progress in a percentage.
func (u *Upload) Progress() int64 {
return (u.offset * 100) / u.size
}
// Offset returns the current upload offset.
func (u *Upload) Offset() int64 {
return u.offset
}
// Size returns the size of the upload body.
func (u *Upload) Size() int64 {
return u.size
}
// EncodedMetadata encodes the upload metadata.
func (u *Upload) EncodedMetadata() string {
var encoded []string
for k, v := range u.Metadata {
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
}
return strings.Join(encoded, ",")
}
func b64encode(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
// NewUpload creates a new upload from an io.Reader.
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
stream, ok := reader.(io.ReadSeeker)
if !ok {
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return nil
}
stream = bytes.NewReader(buf.Bytes())
}
if metadata == nil {
metadata = make(Metadata)
}
return &Upload{
stream: stream,
size: size,
Fingerprint: fingerprint,
Metadata: metadata,
}
}

View File

@@ -1,191 +0,0 @@
package webdav
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Uploader holds all information about a currently running upload
type Uploader struct {
fs *Fs
url string
upload *Upload
offset int64
aborted bool
uploadSubs []chan Upload
notifyChan chan bool
overridePatchMethod bool
}
// NotifyUploadProgress subscribes to progress updates.
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
u.uploadSubs = append(u.uploadSubs, c)
}
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
if resp == nil {
return true, err
}
switch resp.StatusCode {
case 204:
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
*newOff = off
return false, nil
}
return false, err
case 409:
return false, ErrOffsetMismatch
case 412:
return false, ErrVersionMismatch
case 413:
return false, ErrLargeUpload
}
return f.shouldRetry(ctx, resp, err)
}
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
var method string
if !u.overridePatchMethod {
method = "PATCH"
} else {
method = "POST"
}
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
extraHeaders["Tus-Resumable"] = "1.0.0"
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
if u.overridePatchMethod {
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
}
url, err := url.Parse(u.url)
if err != nil {
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
}
// FIXME: Use GetBody func as in chunking.go
opts := rest.Opts{
Method: method,
Path: url.Path,
NoResponse: true,
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
ContentLength: &size,
Body: body,
ContentType: "application/offset+octet-stream",
ExtraHeaders: extraHeaders,
Options: options,
}
var newOffset int64
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
res, err := u.fs.srv.Call(ctx, &opts)
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
})
if err != nil {
return 0, fmt.Errorf("uploadChunk failed: %w", err)
// FIXME What do we do here? Remove the entire upload?
// See https://github.com/tus/tusd/issues/176
}
return newOffset, nil
}
// Upload uploads the entire body to the server.
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
cnt := 1
fs.Debug(u.fs, "Uploaded starts")
for u.offset < u.upload.size && !u.aborted {
err := u.UploadChunk(ctx, cnt, options...)
cnt++
if err != nil {
return err
}
}
fs.Debug(u.fs, "-- Uploaded finished")
return nil
}
// UploadChunk uploads a single chunk.
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
chunkSize := u.fs.opt.ChunkSize
data := make([]byte, chunkSize)
_, err := u.upload.stream.Seek(u.offset, 0)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
return err
}
size, err := u.upload.stream.Read(data)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
return err
}
body := bytes.NewBuffer(data[:size])
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
if err == nil {
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
} else {
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
return err
}
u.offset = newOffset
u.upload.updateProgress(u.offset)
u.notifyChan <- true
return nil
}
// Waits for a signal to broadcast to all subscribers
func (u *Uploader) broadcastProgress() {
for range u.notifyChan {
for _, c := range u.uploadSubs {
c <- *u.upload
}
}
}
// NewUploader creates a new Uploader.
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
notifyChan := make(chan bool)
uploader := &Uploader{
f,
url,
upload,
offset,
false,
nil,
notifyChan,
false,
}
go uploader.broadcastProgress()
return uploader
}

View File

@@ -1,108 +0,0 @@
package webdav
/*
Chunked upload based on the tus protocol for ownCloud Infinite Scale
See https://tus.io/protocols/resumable-upload
*/
import (
"context"
"fmt"
"io"
"net/http"
"path/filepath"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fn := filepath.Base(src.Remote())
metadata := map[string]string{
"filename": fn,
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
"filetype": contentType,
}
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
fingerprint := ""
// create an upload from a file.
upload := NewUpload(in, src.Size(), metadata, fingerprint)
// create the uploader.
uploader, err := o.CreateUploader(ctx, upload, options...)
if err == nil {
// start the uploading process.
err = uploader.Upload(ctx, options...)
}
return err
}
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
switch resp.StatusCode {
case 201:
location := resp.Header.Get("Location")
return false, location, nil
case 412:
return false, "", ErrVersionMismatch
case 413:
return false, "", ErrLargeUpload
}
retry, err := f.shouldRetry(ctx, resp, err)
return retry, "", err
}
// CreateUploader creates a new upload to the server.
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
if u == nil {
return nil, ErrNilUpload
}
// if c.Config.Resume && len(u.Fingerprint) == 0 {
// return nil, ErrFingerprintNotSet
// }
l := int64(0)
p := o.filePath()
// cut the filename off
dir, _ := filepath.Split(p)
if dir == "" {
dir = "/"
}
opts := rest.Opts{
Method: "POST",
Path: dir,
NoResponse: true,
RootURL: o.fs.endpointURL,
ContentLength: &l,
ExtraHeaders: o.extraHeaders(ctx, o),
Options: options,
}
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
var tusLocation string
// rclone http call
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
var retry bool
res, err := o.fs.srv.Call(ctx, &opts)
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
return retry, err
})
if err != nil {
return nil, fmt.Errorf("making upload directory failed: %w", err)
}
uploader := NewUploader(o.fs, tusLocation, u, 0)
return uploader, nil
}

View File

@@ -84,10 +84,7 @@ func init() {
Help: "Nextcloud",
}, {
Value: "owncloud",
Help: "Owncloud 10 PHP based WebDAV server",
}, {
Value: "infinitescale",
Help: "ownCloud Infinite Scale",
Help: "Owncloud",
}, {
Value: "sharepoint",
Help: "Sharepoint Online, authenticated by Microsoft account",
@@ -215,7 +212,6 @@ type Fs struct {
pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision
canStream bool // set if can stream
canTus bool // supports the TUS upload protocol
useOCMtime bool // set if can use X-OC-Mtime
propsetMtime bool // set if can use propset
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
@@ -636,15 +632,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.propsetMtime = true
f.hasOCMD5 = true
f.hasOCSHA1 = true
case "infinitescale":
f.precision = time.Second
f.useOCMtime = true
f.propsetMtime = true
f.hasOCMD5 = false
f.hasOCSHA1 = true
f.canChunk = false
f.canTus = true
f.opt.ChunkSize = 10 * fs.Mebi
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
@@ -1342,7 +1329,7 @@ func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx)
if err != nil {
fs.Infof(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
@@ -1386,7 +1373,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
if err != nil {
fs.Infof(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@@ -1512,21 +1499,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update mkParentDir failed: %w", err)
}
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
fs.Debugf(src, "Update will use the tus protocol to upload")
contentType := fs.MimeType(ctx, src)
err = o.updateViaTus(ctx, in, contentType, src, options...)
if err != nil {
fs.Debug(src, "tus update failed.")
return fmt.Errorf("tus update failed: %w", err)
}
} else if o.shouldUseChunkedUpload(src) {
if o.fs.opt.Vendor == "nextcloud" {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
} else {
fs.Debug(src, "Chunking - unknown vendor")
}
if o.shouldUseChunkedUpload(src) {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
if err != nil {
return err
}
@@ -1538,9 +1513,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// TODO: define getBody() to enable low-level HTTP/2 retries
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
if err != nil {
return fmt.Errorf("unchunked simple update failed: %w", err)
return err
}
}
// read metadata from remote
o.hasMetaData = false
return o.readMetaData(ctx)

View File

@@ -12,5 +12,4 @@
<seb•ɑƬ•chezwam•ɖɵʈ•org>
<allllaboutyou@gmail.com>
<psycho@feltzv.fr>
<afw5059@gmail.com>
<piyushgarg80>
<afw5059@gmail.com>

View File

@@ -4,12 +4,12 @@ This script checks for unauthorized modifications in autogenerated sections of m
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
Features:
- Detects markdown files changed between a commit and one of its ancestors. Default is to
check the last commit only. When triggered on a pull request it should typically compare the
pull request branch head and its merge base - the commit on the main branch before it diverged.
- Detects markdown files changed in the last commit.
- Identifies modified autogenerated sections marked by specific comments.
- Reports violations using GitHub Actions error messages.
- Exits with a nonzero status code if unauthorized changes are found.
It currently only checks the last commit.
"""
import re
@@ -22,18 +22,18 @@ def run_git(args):
"""
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
def get_changed_files(base, head):
def get_changed_files():
"""
Retrieve a list of markdown files that were changed between the base and head commits.
Retrieve a list of markdown files that were changed in the last commit.
"""
files = run_git(["diff", "--name-only", f"{base}...{head}"]).splitlines()
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
return [f for f in files if f.endswith(".md")]
def get_diff(file, base, head):
def get_diff(file):
"""
Get the diff of a given file between the base and head commits.
Get the diff of a given file between the last commit and the current version.
"""
return run_git(["diff", "-U0", f"{base}...{head}", "--", file]).splitlines()
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
def get_file_content(ref, file):
"""
@@ -70,21 +70,17 @@ def show_error(file_name, line, message):
"""
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
def check_file(file, base, head):
def check_file(file):
"""
Check a markdown file for modifications in autogenerated regions.
"""
viol = False
new_lines = get_file_content("HEAD", file)
old_lines = get_file_content("HEAD~1", file)
# If old file did not exist or was empty then don't check
if not old_lines:
return
# Entire autogenerated file check.
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
if get_diff(file, base, head):
if get_diff(file):
show_error(file, 1, "Autogenerated file modified")
return True
return False
@@ -92,7 +88,7 @@ def check_file(file, base, head):
# Partial autogenerated regions.
regions_new = find_regions(new_lines)
regions_old = find_regions(old_lines)
diff = get_diff(file, base, head)
diff = get_diff(file)
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
new_ln = old_ln = None
@@ -124,15 +120,9 @@ def main():
"""
Main function that iterates over changed files and checks them for violations.
"""
base = "HEAD~1"
head = "HEAD"
if len(sys.argv) > 1:
base = sys.argv[1]
if len(sys.argv) > 2:
head = sys.argv[2]
found = False
for f in get_changed_files(base, head):
if check_file(f, base, head):
for f in get_changed_files():
if check_file(f):
found = True
if found:
sys.exit(1)

View File

@@ -1,119 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# Create test TLS certificates for use with rclone.
OUT_DIR="${OUT_DIR:-./tls-test}"
CA_SUBJ="${CA_SUBJ:-/C=US/ST=Test/L=Test/O=Test Org/OU=Test Unit/CN=Test Root CA}"
SERVER_CN="${SERVER_CN:-localhost}"
CLIENT_CN="${CLIENT_CN:-Test Client}"
CLIENT_KEY_PASS="${CLIENT_KEY_PASS:-testpassword}"
CA_DAYS=${CA_DAYS:-3650}
SERVER_DAYS=${SERVER_DAYS:-825}
CLIENT_DAYS=${CLIENT_DAYS:-825}
mkdir -p "$OUT_DIR"
cd "$OUT_DIR"
# Create OpenSSL config
# CA extensions
cat > ca_openssl.cnf <<'EOF'
[ ca_ext ]
basicConstraints = critical, CA:true, pathlen:1
keyUsage = critical, keyCertSign, cRLSign
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer
EOF
# Server extensions (SAN includes localhost + loopback IP)
cat > server_openssl.cnf <<EOF
[ server_ext ]
basicConstraints = critical, CA:false
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = ${SERVER_CN}
IP.1 = 127.0.0.1
EOF
# Client extensions (for mTLS client auth)
cat > client_openssl.cnf <<'EOF'
[ client_ext ]
basicConstraints = critical, CA:false
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
EOF
echo "Create CA key, CSR, and self-signed CA cert"
if [ ! -f ca.key.pem ]; then
openssl genrsa -out ca.key.pem 4096
chmod 600 ca.key.pem
fi
openssl req -new -key ca.key.pem -subj "$CA_SUBJ" -out ca.csr.pem
openssl x509 -req -in ca.csr.pem -signkey ca.key.pem \
-sha256 -days "$CA_DAYS" \
-extfile ca_openssl.cnf -extensions ca_ext \
-out ca.cert.pem
echo "Create server key (NO PASSWORD) and cert signed by CA"
openssl genrsa -out server.key.pem 2048
chmod 600 server.key.pem
openssl req -new -key server.key.pem -subj "/CN=${SERVER_CN}" -out server.csr.pem
openssl x509 -req -in server.csr.pem \
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
-out server.cert.pem -days "$SERVER_DAYS" -sha256 \
-extfile server_openssl.cnf -extensions server_ext
echo "Create client key (PASSWORD-PROTECTED), CSR, and cert"
openssl genrsa -aes256 -passout pass:"$CLIENT_KEY_PASS" -out client.key.pem 2048
chmod 600 client.key.pem
openssl req -new -key client.key.pem -passin pass:"$CLIENT_KEY_PASS" \
-subj "/CN=${CLIENT_CN}" -out client.csr.pem
openssl x509 -req -in client.csr.pem \
-CA ca.cert.pem -CAkey ca.key.pem -CAcreateserial \
-out client.cert.pem -days "$CLIENT_DAYS" -sha256 \
-extfile client_openssl.cnf -extensions client_ext
echo "Verify chain"
openssl verify -CAfile ca.cert.pem server.cert.pem client.cert.pem
echo "Done"
echo
echo "Summary"
echo "-------"
printf "%-22s %s\n" \
"CA key:" "ca.key.pem" \
"CA cert:" "ca.cert.pem" \
"Server key:" "server.key.pem (no password)" \
"Server CSR:" "server.csr.pem" \
"Server cert:" "server.cert.pem (SAN: ${SERVER_CN}, 127.0.0.1)" \
"Client key:" "client.key.pem (encrypted)" \
"Client CSR:" "client.csr.pem" \
"Client cert:" "client.cert.pem" \
"Client key password:" "$CLIENT_KEY_PASS"
echo
echo "Test rclone server"
echo
echo "rclone serve http -vv --addr :8080 --cert ${OUT_DIR}/server.cert.pem --key ${OUT_DIR}/server.key.pem --client-ca ${OUT_DIR}/ca.cert.pem ."
echo
echo "Test rclone client"
echo
echo "rclone lsf :http: --http-url 'https://localhost:8080' --ca-cert ${OUT_DIR}/ca.cert.pem --client-cert ${OUT_DIR}/client.cert.pem --client-key ${OUT_DIR}/client.key.pem --client-pass \$(rclone obscure $CLIENT_KEY_PASS)"
echo

View File

@@ -1,159 +0,0 @@
//go:build ignore
package main
import (
"bytes"
"cmp"
"context"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/runs"
"github.com/stretchr/testify/assert/yaml"
)
var path = flag.String("path", "./docs/content/", "root path")
const (
configFile = "fstest/test_all/config.yaml"
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
)
func main() {
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
if err != nil {
fs.Errorf(*path, "error replacing ignores: %v", err)
}
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
if err != nil {
fs.Errorf(*path, "error replacing failures: %v", err)
}
}
// replaceBetween replaces the text between startSep and endSep with fn()
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
if err != nil {
return err
}
doc := string(b)
before, after, found := strings.Cut(doc, startSep)
if !found {
return fmt.Errorf("could not find: %v", startSep)
}
_, after, found = strings.Cut(after, endSep)
if !found {
return fmt.Errorf("could not find: %v", endSep)
}
replaceSection, err := fn()
if err != nil {
return err
}
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
if err != nil {
return err
}
return nil
}
// getIgnores updates the list of ignores from config.yaml
func getIgnores() (string, error) {
config, err := parseConfig()
if err != nil {
return "", fmt.Errorf("failed to parse config: %v", err)
}
s := ""
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
return cmp.Compare(a.Remote, b.Remote)
})
for _, backend := range config.Backends {
include := false
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
include = true
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
for _, ignore := range backend.Ignore {
if strings.Contains(strings.ToLower(ignore), "bisync") {
if !include { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
include = true
s += fmt.Sprintf(" - `%s`\n", ignore)
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
}
}
}
return s, nil
}
// getFailures updates the list of currently failing tests from the integration tests server
func getFailures() (string, error) {
var buf bytes.Buffer
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
if err != nil {
return "", err
}
r := runs.Report{}
err = json.Unmarshal(buf.Bytes(), &r)
if err != nil {
return "", fmt.Errorf("failed to unmarshal json: %v", err)
}
s := ""
for _, run := range r.Failed {
for i, t := range run.FailedTests {
if strings.Contains(strings.ToLower(t), "bisync") {
if i == 0 { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
}
url := integrationTestsHTMLURL + run.TrialName
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
break
}
}
}
}
s += fmt.Sprintf("- Updated: %v", r.DateTime)
return s, nil
}
// parseConfig reads and parses the config.yaml file
func parseConfig() (*runs.Config, error) {
d, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to read config file: %w", err)
}
config := &runs.Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
return nil, fmt.Errorf("failed to parse config file: %w", err)
}
return config, nil
}

Some files were not shown because too many files have changed in this diff Show More