1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

1 Commits

567 changed files with 52245 additions and 94031 deletions

View File

@@ -29,12 +29,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.23']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -45,14 +45,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -61,14 +61,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -78,14 +78,14 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.25.0-rc.1'
go: '>=1.24.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.24
- job_name: go1.23
os: ubuntu-latest
go: '1.24'
go: '1.23'
quicktest: true
racequicktest: true
@@ -95,12 +95,12 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -216,15 +216,15 @@ jobs:
echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.24.0-rc.1'
go-version: '>=1.23.0-rc.1'
check-latest: true
cache: false
@@ -239,13 +239,13 @@ jobs:
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
- name: Code quality test (Linux)
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v6
with:
version: latest
skip-cache: true
- name: Code quality test (Windows)
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v6
env:
GOOS: "windows"
with:
@@ -253,7 +253,7 @@ jobs:
skip-cache: true
- name: Code quality test (macOS)
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v6
env:
GOOS: "darwin"
with:
@@ -261,7 +261,7 @@ jobs:
skip-cache: true
- name: Code quality test (FreeBSD)
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v6
env:
GOOS: "freebsd"
with:
@@ -269,7 +269,7 @@ jobs:
skip-cache: true
- name: Code quality test (OpenBSD)
uses: golangci/golangci-lint-action@v9
uses: golangci/golangci-lint-action@v6
env:
GOOS: "openbsd"
with:
@@ -282,21 +282,8 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Check Markdown format
uses: DavidAnson/markdownlint-cli2-action@v20
with:
globs: |
CONTRIBUTING.md
MAINTAINERS.md
README.md
RELEASE.md
CODE_OF_CONDUCT.md
librclone\README.md
backend\s3\README.md
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
@@ -307,15 +294,15 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@v5
with:
go-version: '>=1.25.0-rc.1'
go-version: '>=1.24.0-rc.1'
- name: Set global environment variables
run: |

View File

@@ -52,7 +52,7 @@ jobs:
df -h .
- name: Checkout Repository
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -92,7 +92,7 @@ jobs:
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
result-encoding: string
script: |
@@ -183,7 +183,7 @@ jobs:
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
uses: actions/download-artifact@v4
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -30,7 +30,7 @@ jobs:
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -1,151 +1,144 @@
version: "2"
# golangci-lint configuration options
linters:
# Configure the linter set. To avoid unexpected results the implicit default
# set is ignored and all the ones to use are explicitly enabled.
default: none
enable:
# Default
- errcheck
- govet
- ineffassign
- staticcheck
- unused
# Additional
- gocritic
- misspell
#- prealloc # TODO
- revive
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.
checks:
# Default
- all
- -ST1000
- -ST1003
- -ST1016
- -ST1020
- -ST1021
- -ST1022
# Disable quickfix checks
- -QF*
gocritic:
# With gocritic there are different settings, but since enabled-checks
# and disabled-checks cannot both be set, for full customization the
# alternative is to disable all defaults and explicitly enable the ones
# to use.
disable-all: true
enabled-checks:
#- appendAssign # Skip default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Skip default
- caseOrder
- codegenComment
#- commentFormatting # Skip default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Skip default
- flagDeref
- flagName
#- ifElseChain # Skip default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Enable additional check that are not enabled by default
#- singleCaseSwitch # Skip default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: ${base-path}/bin/rules.go
revive:
# With revive there is in reality only one setting, and when at least one
# rule are specified then only these rules will be considered, defaults
# and all others are then implicitly disabled, so must explicitly enable
# all rules to be used.
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
#- name: empty-block # Skip default
# disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
#- name: increment-decrement # Skip default
# disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
#- name: redefines-builtin-id # Skip default
# disabled: true
#- name: superfluous-else # Skip default
# disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
#- name: unreachable-code # Skip default
# disabled: true
#- name: unused-parameter # Skip default
# disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
formatters:
enable:
- goimports
- revive
- ineffassign
- govet
- unconvert
- staticcheck
- gosimple
- stylecheck
- unused
- misspell
- gocritic
#- prealloc
#- maligned
disable-all: true
issues:
# Enable some lints excluded by default
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0
exclude-rules:
- linters:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# Timeout for total work, e.g. 30s, 5m, 5m30s. Default is 0 (disabled).
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
gocritic:
# Enable all default checks with some exceptions and some additions (commented).
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
disable-all: true
enabled-checks:
#- appendAssign # Enabled by default
- argOrder
- assignOp
- badCall
- badCond
#- captLocal # Enabled by default
- caseOrder
- codegenComment
#- commentFormatting # Enabled by default
- defaultCaseOrder
- deprecatedComment
- dupArg
- dupBranchBody
- dupCase
- dupSubExpr
- elseif
#- exitAfterDefer # Enabled by default
- flagDeref
- flagName
#- ifElseChain # Enabled by default
- mapKey
- newDeref
- offBy1
- regexpMust
- ruleguard # Not enabled by default
#- singleCaseSwitch # Enabled by default
- sloppyLen
- sloppyTypeAssert
- switchTrue
- typeSwitchVar
- underef
- unlambda
- unslice
- valSwap
- wrapperFunc
settings:
ruleguard:
rules: "${configDir}/bin/rules.go"

View File

@@ -1,72 +0,0 @@
default: true
# Use specific styles, to be consistent accross all documents.
# Default is to accept any as long as it is consistent within the same document.
heading-style: # MD003
style: atx
ul-style: # MD004
style: dash
hr-style: # MD035
style: ---
code-block-style: # MD046
style: fenced
code-fence-style: # MD048
style: backtick
emphasis-style: # MD049
style: asterisk
strong-style: # MD050
style: asterisk
# Allow multiple headers with same text as long as they are not siblings.
no-duplicate-heading: # MD024
siblings_only: true
# Allow long lines in code blocks and tables.
line-length: # MD013
code_blocks: false
tables: false
# The Markdown files used to generated docs with Hugo contain a top level
# header, even though the YAML front matter has a title property (which is
# used for the HTML document title only). Suppress Markdownlint warning:
# Multiple top-level headings in the same document.
single-title: # MD025
level: 1
front_matter_title:
# The HTML docs generated by Hugo from Markdown files may have slightly
# different header anchors than GitHub rendered Markdown, e.g. Hugo trims
# leading dashes so "--config string" becomes "#config-string" while it is
# "#--config-string" in GitHub preview. When writing links to headers in the
# Markdown files we must use whatever works in the final HTML generated docs.
# Suppress Markdownlint warning: Link fragments should be valid.
link-fragments: false # MD051
# Restrict the languages and language identifiers to use for code blocks.
# We only want those supported by both Hugo and GitHub. These are documented
# here:
# https://gohugo.io/content-management/syntax-highlighting/#languages
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
# In addition, we only want to allow identifiers (aliases) that correspond to
# the same language in Hugo and GitHub, and preferrably also VSCode and other
# commonly used tools, to avoid confusion. An example of this is that "shell"
# by some are considered an identifier for shell scripts, i.e. an alias for
# "sh", while others consider it an identifier for shell sessions, i.e. an
# alias for "console". Although Hugo and GitHub in this case are consistent and
# have choosen the former, using "sh" instead, and not allowing use of "shell",
# avoids the confusion entirely.
fenced-code-language: # MD040
allowed_languages:
- text
- console
- sh
- bat
- ini
- json
- yaml
- go
- python
- c++
- c#
- java
- powershell

View File

@@ -1,80 +0,0 @@
# Rclone Code of Conduct
Like the technical community as a whole, the Rclone team and community
is made up of a mixture of professionals and volunteers from all over
the world, working on every aspect of the mission - including
mentorship, teaching, and connecting people.
Diversity is one of our huge strengths, but it can also lead to
communication issues and unhappiness. To that end, we have a few
ground rules that we ask people to adhere to. This code applies
equally to founders, mentors and those seeking help and guidance.
This isn't an exhaustive list of things that you can't do. Rather,
take it in the spirit in which it's intended - a guide to make it
easier to enrich all of us and the technical communities in which we
participate.
This code of conduct applies to all spaces managed by the Rclone
project or Rclone Services Ltd. This includes the issue tracker, the
forum, the GitHub site, the wiki, any other online services or
in-person events. In addition, violations of this code outside these
spaces may affect a person's ability to participate within them.
- **Be friendly and patient.**
- **Be welcoming.** We strive to be a community that welcomes and
supports people of all backgrounds and identities. This includes,
but is not limited to members of any race, ethnicity, culture,
national origin, colour, immigration status, social and economic
class, educational level, sex, sexual orientation, gender identity
and expression, age, size, family status, political belief,
religion, and mental and physical ability.
- **Be considerate.** Your work will be used by other people, and you
in turn will depend on the work of others. Any decision you take
will affect users and colleagues, and you should take those
consequences into account when making decisions. Remember that we're
a world-wide community, so you might not be communicating in someone
else's primary language.
- **Be respectful.** Not all of us will agree all the time, but
disagreement is no excuse for poor behavior and poor manners. We
might all experience some frustration now and then, but we cannot
allow that frustration to turn into a personal attack. It's
important to remember that a community where people feel
uncomfortable or threatened is not a productive one. Members of the
Rclone community should be respectful when dealing with other
members as well as with people outside the Rclone community.
- **Be careful in the words that you choose.** We are a community of
professionals, and we conduct ourselves professionally. Be kind to
others. Do not insult or put down other participants. Harassment and
other exclusionary behavior aren't acceptable. This includes, but is
not limited to:
- Violent threats or language directed against another person.
- Discriminatory jokes and language.
- Posting sexually explicit or violent material.
- Posting (or threatening to post) other people's personally
identifying information ("doxing").
- Personal insults, especially those using racist or sexist terms.
- Unwelcome sexual attention.
- Advocating for, or encouraging, any of the above behavior.
- Repeated harassment of others. In general, if someone asks you to
stop, then stop.
- **When we disagree, try to understand why.** Disagreements, both
social and technical, happen all the time and Rclone is no
exception. It is important that we resolve disagreements and
differing views constructively. Remember that we're different. The
strength of Rclone comes from its varied community, people from a
wide range of backgrounds. Different people have different
perspectives on issues. Being unable to understand why someone holds
a viewpoint doesn't mean that they're wrong. Don't forget that it is
human to err and blaming each other doesn't get us anywhere.
Instead, focus on helping to resolve issues and learning from
mistakes.
If you believe someone is violating the code of conduct, we ask that
you report it by emailing [info@rclone.com](mailto:info@rclone.com).
Original text courtesy of the [Speak Up! project](http://web.archive.org/web/20141109123859/http://speakup.io/coc.html).
## Questions?
If you have questions, please feel free to [contact us](mailto:info@rclone.com).

View File

@@ -15,81 +15,61 @@ with the [latest beta of rclone](https://beta.rclone.org/):
- Rclone version (e.g. output from `rclone version`)
- Which OS you are using and how many bits (e.g. Windows 10, 64 bit)
- The command you were trying to run (e.g. `rclone copy /tmp remote:tmp`)
- A log of the command with the `-vv` flag (e.g. output from
`rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to
obscure them
- A log of the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
- if the log contains secrets then edit the file with a text editor first to obscure them
## Submitting a new feature or bug fix
If you find a bug that you'd like to fix, or a new feature that you'd
like to implement then please submit a pull request via GitHub.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues)
first so it can be discussed.
If it is a big feature, then [make an issue](https://github.com/rclone/rclone/issues) first so it can be discussed.
To prepare your pull request first press the fork button on [rclone's GitHub
page](https://github.com/rclone/rclone).
Then [install Git](https://git-scm.com/downloads) and set your public contribution
[name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git)
and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Then [install Git](https://git-scm.com/downloads) and set your public contribution [name](https://docs.github.com/en/github/getting-started-with-github/setting-your-username-in-git) and [email](https://docs.github.com/en/github/setting-up-and-managing-your-github-user-account/setting-your-commit-email-address#setting-your-commit-email-address-in-git).
Next open your terminal, change directory to your preferred folder and initialise
your local rclone project:
Next open your terminal, change directory to your preferred folder and initialise your local rclone project:
```console
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
```
git clone https://github.com/rclone/rclone.git
cd rclone
git remote rename origin upstream
# if you have SSH keys setup in your GitHub account:
git remote add origin git@github.com:YOURUSER/rclone.git
# otherwise:
git remote add origin https://github.com/YOURUSER/rclone.git
Note that most of the terminal commands in the rest of this guide must be
executed from the rclone folder created above.
Note that most of the terminal commands in the rest of this guide must be executed from the rclone folder created above.
Now [install Go](https://golang.org/doc/install) and verify your installation:
```console
go version
```
go version
Great, you can now compile and execute your own version of rclone:
```console
go build
./rclone version
```
go build
./rclone version
(Note that you can also replace `go build` with `make`, which will include a
more accurate version number in the executable as well as enable you to specify
more build options.) Finally make a branch to add your new feature
```console
git checkout -b my-new-feature
```
git checkout -b my-new-feature
And get hacking.
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins)
and a quick view on the rclone [code organisation](#code-organisation).
You may like one of the [popular editors/IDE's for Go](https://github.com/golang/go/wiki/IDEsAndTextEditorPlugins) and a quick view on the rclone [code organisation](#code-organisation).
When ready - test the affected functionality and run the unit tests for the
code you changed
When ready - test the affected functionality and run the unit tests for the code you changed
```console
cd folder/with/changed/files
go test -v
```
cd folder/with/changed/files
go test -v
Note that you may need to make a test remote, e.g. `TestSwift` for some
of the unit tests.
This is typically enough if you made a simple bug fix, otherwise please read
the rclone [testing](#testing) section too.
This is typically enough if you made a simple bug fix, otherwise please read the rclone [testing](#testing) section too.
Make sure you
@@ -99,19 +79,14 @@ Make sure you
When you are done with that push your changes to GitHub:
```console
git push -u origin my-new-feature
```
git push -u origin my-new-feature
and open the GitHub website to [create your pull
request](https://help.github.com/articles/creating-a-pull-request/).
Your changes will then get reviewed and you might get asked to fix some stuff.
If so, then make the changes in the same branch, commit and push your updates to
GitHub.
Your changes will then get reviewed and you might get asked to fix some stuff. If so, then make the changes in the same branch, commit and push your updates to GitHub.
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master)
or [squash your commits](#squashing-your-commits).
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub
@@ -119,118 +94,87 @@ or [squash your commits](#squashing-your-commits).
Follow the guideline for [commit messages](#commit-messages) and then:
```console
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
```
git checkout my-new-feature # To switch to your branch
git status # To see the new and changed files
git add FILENAME # To select FILENAME for the commit
git status # To verify the changes to be committed
git commit # To do the commit
git log # To verify the commit. Use q to quit the log
You can modify the message or changes in the latest commit using:
```console
git commit --amend
```
git commit --amend
If you amend to commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you amend to commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Replacing your previously pushed commits
Note that you are about to rewrite the GitHub history of your branch. It is good
practice to involve your collaborators before modifying commits that have been
pushed to GitHub.
Note that you are about to rewrite the GitHub history of your branch. It is good practice to involve your collaborators before modifying commits that have been pushed to GitHub.
Your previously pushed commits are replaced by:
```console
git push --force origin my-new-feature
```
git push --force origin my-new-feature
### Basing your changes on the latest master
To base your changes on the latest version of the
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
To base your changes on the latest version of the [rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
```console
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
```
git checkout master
git fetch upstream
git merge --ff-only
git push origin --follow-tags # optional update of your fork in GitHub
git checkout my-new-feature
git rebase master
If you rebase commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you rebase commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
### Squashing your commits
### Squashing your commits ###
To combine your commits into one commit:
```console
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
```
git log # To count the commits to squash, e.g. the last 2
git reset --soft HEAD~2 # To undo the 2 latest commits
git status # To check everything is as expected
If everything is fine, then make the new combined commit:
```console
git commit # To commit the undone commits as one
```
git commit # To commit the undone commits as one
otherwise, you may roll back using:
```console
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
```
git reflog # To check that HEAD{1} is your previous state
git reset --soft 'HEAD@{1}' # To roll back to your previous state
If you squash commits that have been pushed to GitHub, then you will have to
[replace your previously pushed commits](#replacing-your-previously-pushed-commits).
If you squash commits that have been pushed to GitHub, then you will have to [replace your previously pushed commits](#replacing-your-previously-pushed-commits).
Tip: You may like to use `git rebase -i master` if you are experienced or have a
more complex situation.
Tip: You may like to use `git rebase -i master` if you are experienced or have a more complex situation.
### GitHub Continuous Integration
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions)
to build and test the project, which should be automatically available for your
fork too from the `Actions` tab in your repository.
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
## Testing
### Code quality tests
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then
you can run the same tests as get run in the CI which can be very helpful.
If you install [golangci-lint](https://github.com/golangci/golangci-lint) then you can run the same tests as get run in the CI which can be very helpful.
You can run them with `make check` or with `golangci-lint run ./...`.
Using these tests ensures that the rclone codebase all uses the same coding
standards. These tests also check for easy mistakes to make (like forgetting
to check an error return).
Using these tests ensures that the rclone codebase all uses the same coding standards. These tests also check for easy mistakes to make (like forgetting to check an error return).
### Quick testing
rclone's tests are run from the go testing framework, so at the top
level you can run this to run all the tests.
```console
go test -v ./...
```
go test -v ./...
You can also use `make`, if supported by your platform
```console
make quicktest
```
make quicktest
The quicktest is [automatically run by GitHub](#github-continuous-integration)
when you push your branch to GitHub.
The quicktest is [automatically run by GitHub](#github-continuous-integration) when you push your branch to GitHub.
### Backend testing
@@ -246,50 +190,41 @@ need to make a remote called `TestDrive`.
You can then run the unit tests in the drive directory. These tests
are skipped if `TestDrive:` isn't defined.
```console
cd backend/drive
go test -v
```
cd backend/drive
go test -v
You can then run the integration tests which test all of rclone's
operations. Normally these get run against the local file system,
but they can be run against any of the remotes.
```console
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/sync
go test -v -remote TestDrive:
go test -v -remote TestDrive: -fast-list
cd fs/operations
go test -v -remote TestDrive:
```
cd fs/operations
go test -v -remote TestDrive:
If you want to use the integration test framework to run these tests
altogether with an HTML report and test retries then from the
project root:
```console
go run ./fstest/test_all -backends drive
```
go install github.com/rclone/rclone/fstest/test_all
test_all -backends drive
### Full integration testing
If you want to run all the integration tests against all the remotes,
then change into the project root and run
```console
make check
make test
```
make check
make test
The commands may require some extra go packages which you can install with
```console
make build_dep
```
make build_dep
The full integration tests are run daily on the integration test server. You can
find the results at <https://integration.rclone.org>
find the results at https://pub.rclone.org/integration-tests/
## Code Organisation
@@ -297,48 +232,46 @@ Rclone code is organised into a small number of top level directories
with modules beneath.
- backend - the rclone backends for interfacing to cloud providers -
- all - import this to load all the cloud providers
- ...providers
- all - import this to load all the cloud providers
- ...providers
- bin - scripts for use while building or maintaining rclone
- cmd - the rclone commands
- all - import this to load all the commands
- ...commands
- all - import this to load all the commands
- ...commands
- cmdtest - end-to-end tests of commands, flags, environment variables,...
- docs - the documentation and website
- content - adjust these docs only, except those marked autogenerated
or portions marked autogenerated where the corresponding .go file must be
edited instead, and everything else is autogenerated
- commands - these are auto-generated, edit the corresponding .go file
- content - adjust these docs only - everything else is autogenerated
- command - these are auto-generated - edit the corresponding .go file
- fs - main rclone definitions - minimal amount of code
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- accounting - bandwidth limiting and statistics
- asyncreader - an io.Reader which reads ahead
- config - manage the config file and flags
- driveletter - detect if a name is a drive letter
- filter - implements include/exclude filtering
- fserrors - rclone specific error handling
- fshttp - http handling for rclone
- fspath - path handling for rclone
- hash - defines rclone's hash types and functions
- list - list a remote
- log - logging facilities
- march - iterates directories in lock step
- object - in memory Fs objects
- operations - primitives for sync, e.g. Copy, Move
- sync - sync directories
- walk - walk a directory
- fstest - provides integration test framework
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- fstests - integration tests for the backends
- mockdir - mocks an fs.Directory
- mockobject - mocks an fs.Object
- test_all - Runs integration tests for everything
- graphics - the images used in the website, etc.
- lib - libraries used by the backend
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- atexit - register functions to run when rclone exits
- dircache - directory ID to name caching
- oauthutil - helpers for using oauth
- pacer - retries with backoff and paces operations
- readers - a selection of useful io.Readers
- rest - a thin abstraction over net/http for REST
- librclone - in memory interface to rclone's API for embedding rclone
- vfs - Virtual FileSystem layer for implementing rclone mount and similar
@@ -346,109 +279,47 @@ with modules beneath.
If you are adding a new feature then please update the documentation.
The documentation sources are generally in Markdown format, in conformance
with the CommonMark specification and compatible with GitHub Flavored
Markdown (GFM). The markdown format and style is checked as part of the lint
operation that runs automatically on pull requests, to enforce standards and
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
tool by David Anson, which can also be integrated into editors so you can
perform the same checks while writing. It generally follows Ciro Santilli's
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
is good source if you want to know more.
HTML pages, served as website <rclone.org>, are generated from the Markdown,
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
there is currently used a different algorithm for generating header anchors
than what GitHub uses for its Markdown rendering. For example, in the HTML docs
generated by Hugo any leading `-` characters are ignored, which means when
linking to a header with text `--config string` we therefore need to use the
link `#config-string` in our Markdown source, which will not work in GitHub's
preview where `#--config-string` would be the correct link.
Most of the documentation are written directly in text files with extension
`.md`, mainly within folder `docs/content`. Note that several of such files
are autogenerated (e.g. the command documentation, and `docs/content/flags.md`),
or contain autogenerated portions (e.g. the backend documentation under
`docs/content/commands`). These are marked with an `autogenerated` comment.
The sources of the autogenerated text are usually Markdown formatted text
embedded as string values in the Go source code, so you need to locate these
and edit the `.go` file instead. The `MANUAL.*`, `rclone.1` and other text
files in the root of the repository are also autogenerated. The autogeneration
of files, and the website, will be done during the release process. See the
`make doc` and `make website` targets in the Makefile if you are interested in
how. You don't need to run these when adding a feature.
If you add a new general flag (not for a backend), then document it in
`docs/content/docs.md` - the flags there are supposed to be in
alphabetical order.
If you add a new backend option/flag, then it should be documented in
the source file in the `Help:` field:
the source file in the `Help:` field.
- Start with the most important information about the option,
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
as a single sentence on a single line.
- This text will be used for the command-line flag help.
- It will be combined with other information, such as any default value,
and the result will look odd if not written as a single sentence.
- It should end with a period/full stop character, which will be shown
in docs but automatically removed when producing the flag help.
- Try to keep it below 80 characters, to reduce text wrapping in the terminal.
- More details can be added in a new paragraph, after an empty line (`"\n\n"`).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- Like with docs generated from Markdown, a single line break is ignored
and two line breaks creates a new paragraph.
- This text will be shown to the user in `rclone config`
and in the docs (where it will be added by `make backenddocs`,
normally run some time before next release).
- To create options of enumeration type use the `Examples:` field.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
- You can run `make backenddocs` to verify the resulting Markdown.
- This will update the autogenerated sections of the backend docs Markdown
files under `docs/content`.
- It requires you to have [Python](https://www.python.org) installed.
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
and you can also run this directly, optionally with the name of a backend
as argument to only update the docs for a specific backend.
- **Do not** commit the updated Markdown files. This operation is run as part of
the release process. Since any manual changes in the autogenerated sections
of the Markdown files will then be lost, we have a pull request check that
reports error for any changes within the autogenerated sections. Should you
have done manual changes outside of the autogenerated sections they must be
committed, of course.
- You can run `make serve` to verify the resulting website.
- This will build the website and serve it locally, so you can open it in
your web browser and verify that the end result looks OK. Check specifically
any added links, also in light of the note above regarding different algorithms
for generated header anchors.
- It requires you to have the [Hugo](https://gohugo.io) tool available.
- The `serve` make target depends on the `website` target, which runs the
`hugo` command from the `docs` directory to build the website, and then
it serves the website locally with an embedded web server using a command
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
can run similar Hugo commands directly as well.
- Each example value have their own `Help:` field, but they are treated
a bit different than the main option help text. They will be shown
as an unordered list, therefore a single line break is enough to
create a new list item. Also, for enumeration texts like name of
countries, it looks better without an ending period/full stop character.
When writing documentation for an entirely new backend,
see [backend documentation](#backend-documentation).
The only documentation you need to edit are the `docs/content/*.md`
files. The `MANUAL.*`, `rclone.1`, website, etc. are all auto-generated
from those during the release process. See the `make doc` and `make
website` targets in the Makefile if you are interested in how. You
don't need to run these when adding a feature.
If you are updating documentation for a command, you must do that in the
command source code, e.g. `cmd/ls/ls.go`. Write flag help strings as a single
sentence on a single line, without a period/full stop character at the end,
as it will be combined unmodified with other information (such as any default
value).
Documentation for rclone sub commands is with their code, e.g.
`cmd/ls/ls.go`. Write flag help strings as a single sentence on a single
line, without a period/full stop character at the end, as it will be
combined unmodified with other information (such as any default value).
Note that you can use
[GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy. Just remember the
caveat when linking to header anchors, noted above, which means that GitHub's
Markdown preview may not be an entirely reliable verification of the results.
After your changes have been merged, you can verify them on
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
current state of the master branch at 07:00 UTC. The changes will be on the main
[rclone.org](https://rclone.org) site once they have been included in a release.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release
@@ -479,13 +350,13 @@ change will get linked into the issue.
Here is an example of a short commit message:
```text
```
drive: add team drive support - fixes #885
```
And here is an example of a longer one:
```text
```
mount: fix hang on errored upload
In certain circumstances, if an upload failed then the mount could hang
@@ -508,9 +379,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
instructions below. These will fetch the dependency and add it to
`go.mod` and `go.sum`.
```console
go get github.com/ncw/new_dependency
```
go get github.com/ncw/new_dependency
You can add constraints on that package when doing `go get` (see the
go docs linked above), but don't unless you really need to.
@@ -522,9 +391,7 @@ and `go.sum` in the same commit as your other changes.
If you need to update a dependency then run
```console
go get golang.org/x/crypto
```
go get golang.org/x/crypto
Check in a single commit as above.
@@ -567,38 +434,25 @@ remote or an fs.
### Getting going
- Create `backend/remote/remote.go` (copy this from a similar remote)
- box is a good one to start from if you have a directory-based remote (and
shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- box is a good one to start from if you have a directory-based remote (and shows how to use the directory cache)
- b2 is a good one to start from if you have a bucket-based remote
- Add your remote to the imports in `backend/all/all.go`
- HTTP based remotes are easiest to maintain if they use rclone's
[lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but
if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote
more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to
make sure we can encode any path name and `rclone info` to help determine the
encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
- HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good Go SDK from the provider then use that instead.
- Try to implement as many optional methods as possible as it makes the remote more usable.
- Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
- `rclone purge -v TestRemote:rclone-info`
- `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
- `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
- open `remote.csv` in a spreadsheet and examine
### Guidelines for a speedy merge
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest)
if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp)
if your backend is HTTP based - this adds features like `--dump bodies`,
`--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function
names, layout, structure. **Don't** move stuff around and **Don't** delete the
comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few
backends like that - don't follow them!)
- **Do** use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend.
- **Do** use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) if your backend is HTTP based - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
- **Do** follow your example backend exactly - use the same code order, function names, layout, structure. **Don't** move stuff around and **Don't** delete the comments.
- **Do not** split your backend up into `fs.go` and `object.go` (there are a few backends like that - don't follow them!)
- **Do** put your API type definitions in a separate file - by preference `api/types.go`
- **Remember** we have >50 backends to maintain so keeping them as similar as
possible to each other is a high priority!
- **Remember** we have >50 backends to maintain so keeping them as similar as possible to each other is a high priority!
### Unit tests
@@ -609,19 +463,19 @@ remote or an fs.
### Integration tests
- Add your backend to `fstest/test_all/config.yaml`
- Once you've done that then you can use the integration test framework from
the project root:
- `go run ./fstest/test_all -backends remote`
- Once you've done that then you can use the integration test framework from the project root:
- go install ./...
- test_all -backends remote
Or if you want to run the integration tests manually:
- Make sure integration tests pass with
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- `cd fs/operations`
- `go test -v -remote TestRemote:`
- `cd fs/sync`
- `go test -v -remote TestRemote:`
- If your remote defines `ListR` check with this also
- `go test -v -remote TestRemote: -fast-list`
- `go test -v -remote TestRemote: -fast-list`
See the [testing](#testing) section for more information on integration tests.
@@ -633,13 +487,10 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
`Google Drive`) but with the local file system last.
- `README.md` - main GitHub page
- `docs/content/remote.md` - main docs page (note the backend options are
automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your
reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features
table and the Optional Features table.
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
- update them in your backend with `bin/make_backend_docs.py remote`
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
- `docs/content/docs.md` - list of remotes in config section
- `docs/content/_index.md` - front page of rclone.org
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
@@ -650,43 +501,78 @@ in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
[Please see the guide in the S3 backend directory](backend/s3/README.md).
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin
New features (backends, commands) can also be added "out-of-tree", through Go
plugins. Changes will be kept in a dynamically loaded file instead of being
compiled into the main binary. This is useful if you can't merge your changes
upstream or don't want to maintain a fork of rclone.
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
### Usage
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source
of rclone)
- Naming
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
- `KIND` should be one of `backend`, `command` or `bundle`.
- Example: A plugin with backend support for PiFS would be called
`librcloneplugin_backend_pifs.so`.
- Loading
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
- Supported on rclone v1.50 or greater.
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
- If this variable doesn't exist, plugin support is disabled.
- Plugins must be compiled against the exact version of rclone to work.
(The rclone used during building the plugin must be the same as the source of rclone)
### Building
To turn your existing additions into a Go plugin, move them to an external repository
and change the top-level package name to `main`.
Check `rclone --version` and make sure that the plugin's rclone dependency and
host Go version match.
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
@@ -699,6 +585,6 @@ add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in <https://github.com/rclone/rclone_out_of_tree_example>
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,4 +1,4 @@
# Maintainers guide for rclone
# Maintainers guide for rclone #
Current active maintainers of rclone are:
@@ -24,108 +24,80 @@ Current active maintainers of rclone are:
| Dan McArdle | @dmcardle | gitannex |
| Sam Harrison | @childish-sambino | filescom |
## This is a work in progress draft
**This is a work in progress Draft**
This is a guide for how to be an rclone maintainer. This is mostly a write-up
of what I (@ncw) attempt to do.
This is a guide for how to be an rclone maintainer. This is mostly a write-up of what I (@ncw) attempt to do.
## Triaging Tickets
## Triaging Tickets ##
When a ticket comes in it should be triaged. This means it should be classified
by adding labels and placed into a milestone. Quite a lot of tickets need a bit
of back and forth to determine whether it is a valid ticket so tickets may
remain without labels or milestone for a while.
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
Rclone uses the labels like this:
- `bug` - a definitely verified bug
- `can't reproduce` - a problem which we can't reproduce
- `doc fix` - a bug in the documentation - if users need help understanding the
docs add this label
- `duplicate` - normally close these and ask the user to subscribe to the original
- `enhancement: new remote` - a new rclone backend
- `enhancement` - a new feature
- `FUSE` - to do with `rclone mount` command
- `good first issue` - mark these if you find a small self-contained issue -
these get shown to new visitors to the project
- `help` wanted - mark these if you find a self-contained issue - these get
shown to new visitors to the project
- `IMPORTANT` - note to maintainers not to forget to fix this for the release
- `maintenance` - internal enhancement, code re-organisation, etc.
- `Needs Go 1.XX` - waiting for that version of Go to be released
- `question` - not a `bug` or `enhancement` - direct to the forum for next time
- `Remote: XXX` - which rclone backend this affects
- `thinking` - not decided on the course of action yet
* `bug` - a definitely verified bug
* `can't reproduce` - a problem which we can't reproduce
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
* `duplicate` - normally close these and ask the user to subscribe to the original
* `enhancement: new remote` - a new rclone backend
* `enhancement` - a new feature
* `FUSE` - to do with `rclone mount` command
* `good first issue` - mark these if you find a small self-contained issue - these get shown to new visitors to the project
* `help` wanted - mark these if you find a self-contained issue - these get shown to new visitors to the project
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
* `maintenance` - internal enhancement, code re-organisation, etc.
* `Needs Go 1.XX` - waiting for that version of Go to be released
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
* `Remote: XXX` - which rclone backend this affects
* `thinking` - not decided on the course of action yet
If it turns out to be a bug or an enhancement it should be tagged as such, with
the appropriate other tags. Don't forget the "good first issue" tag to give new
contributors something easy to do to get going.
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
When a ticket is tagged it should be added to a milestone, either the next
release, the one after, Soon or Help Wanted. Bugs can be added to the
"Known Bugs" milestone if they aren't planned to be fixed or need to wait for
something (e.g. the next go release).
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (e.g. the next go release).
The milestones have these meanings:
- v1.XX - stuff we would like to fit into this release
- v1.XX+1 - stuff we are leaving until the next release
- Soon - stuff we think is a good idea - waiting to be scheduled for a release
- Help wanted - blue sky stuff that might get moved up, or someone could help with
- Known bugs - bugs waiting on external factors or we aren't going to fix for
the moment
* v1.XX - stuff we would like to fit into this release
* v1.XX+1 - stuff we are leaving until the next release
* Soon - stuff we think is a good idea - waiting to be scheduled for a release
* Help wanted - blue sky stuff that might get moved up, or someone could help with
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile)
are good candidates for ones that have slipped between the gaps and need
following up.
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
## Closing Tickets
## Closing Tickets ##
Close tickets as soon as you can - make sure they are tagged with a release.
Post a link to a beta in the ticket with the fix in, asking for feedback.
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
## Pull requests
## Pull requests ##
Try to process pull requests promptly!
Merging pull requests on GitHub itself works quite well nowadays so you can
squash and rebase or rebase pull requests. rclone doesn't use merge commits.
Use the squash and rebase option if you need to edit the commit message.
Merging pull requests on GitHub itself works quite well nowadays so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
After merging the commit, in your local master branch, do `git pull` then run
`bin/update-authors.py` to update the authors file then `git push`.
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
Sometimes pull requests need to be left open for a while - this especially true
of contributions of new backends which take a long time to get right.
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
## Merges
## Merges ##
If you are merging a branch locally then do `git merge --ff-only branch-name` to
avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
## Release cycle
## Release cycle ##
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer
if there is something big to merge that didn't stabilize properly or for personal
reasons.
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
High impact regressions should be fixed before the next release.
Near the start of the release cycle, the dependencies should be updated with
`make update` to give time for bugs to surface.
Near the start of the release cycle, the dependencies should be updated with `make update` to give time for bugs to surface.
Towards the end of the release cycle try not to merge anything too big so let
things settle down.
Towards the end of the release cycle try not to merge anything too big so let things settle down.
Follow the instructions in RELEASE.md for making the release. Note that the
testing part is the most time-consuming often needing several rounds of test
and fix depending on exactly how many new features rclone has gained.
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time-consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
## Mailing list
## Mailing list ##
There is now an invite-only mailing list for rclone developers `rclone-dev` on
google groups.
There is now an invite-only mailing list for rclone developers `rclone-dev` on google groups.
## TODO
## TODO ##
I should probably make a <dev@rclone.org> to register with cloud providers.
I should probably make a dev@rclone.org to register with cloud providers.

46416
MANUAL.html generated

File diff suppressed because it is too large Load Diff

20435
MANUAL.md generated

File diff suppressed because it is too large Load Diff

5941
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -100,7 +100,6 @@ compiletest:
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies
@@ -114,21 +113,21 @@ release_dep_linux:
# Update dependencies
showupdates:
@echo "*** Direct dependencies that could be updated ***"
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
# Update direct dependencies only
updatedirect:
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
go mod tidy
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
GO111MODULE=on go mod tidy
# Update direct and indirect dependencies and test dependencies
update:
go get -u -t ./...
go mod tidy
GO111MODULE=on go get -d -u -t ./...
GO111MODULE=on go mod tidy
# Tidy the module dependencies
tidy:
go mod tidy
GO111MODULE=on go mod tidy
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
@@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'
@@ -246,7 +243,7 @@ fetch_binaries:
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
serve: website
cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache
cd docs && hugo server --logLevel info -w --disableFastRender
tag: retag doc
bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new

268
README.md
View File

@@ -1,6 +1,6 @@
<!-- markdownlint-disable-next-line first-line-heading no-inline-html -->
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
<!-- markdownlint-disable-next-line no-inline-html -->
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[Website](https://rclone.org) |
@@ -18,111 +18,102 @@
# Rclone
Rclone *("rsync for cloud storage")* is a command-line program to sync files and
directories to and from different cloud storage providers.
Rclone *("rsync for cloud storage")* is a command-line program to sync files and directories to and from different cloud storage providers.
## Storage providers
- 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
- Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
- Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba)
- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
- FileLu [:page_facing_up:](https://rclone.org/filelu/)
- Files.com [:page_facing_up:](https://rclone.org/filescom/)
- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
- FTP [:page_facing_up:](https://rclone.org/ftp/)
- GoFile [:page_facing_up:](https://rclone.org/gofile/)
- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
- HTTP [:page_facing_up:](https://rclone.org/http/)
- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
- ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
- Linkbox [:page_facing_up:](https://rclone.org/linkbox)
- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
- Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
- MEGA [:page_facing_up:](https://rclone.org/mega/)
- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
- Memory [:page_facing_up:](https://rclone.org/memory/)
- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
- Minio [:page_facing_up:](https://rclone.org/s3/#minio)
- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/)
- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud)
- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
- pCloud [:page_facing_up:](https://rclone.org/pcloud/)
- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
- PikPak [:page_facing_up:](https://rclone.org/pikpak/)
- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
- put.io [:page_facing_up:](https://rclone.org/putio/)
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
- Seafile [:page_facing_up:](https://rclone.org/seafile/)
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
- Storj [:page_facing_up:](https://rclone.org/storj/)
- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
- WebDAV [:page_facing_up:](https://rclone.org/webdav/)
- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
- The local filesystem [:page_facing_up:](https://rclone.org/local/)
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
* Box [:page_facing_up:](https://rclone.org/box/)
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
* Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
* FileLu [:page_facing_up:](https://rclone.org/filelu/)
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
* FTP [:page_facing_up:](https://rclone.org/ftp/)
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linkbox [:page_facing_up:](https://rclone.org/linkbox)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* MEGA [:page_facing_up:](https://rclone.org/mega/)
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
* Memory [:page_facing_up:](https://rclone.org/memory/)
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
* Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud)
* OVH [:page_facing_up:](https://rclone.org/swift/)
* Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Uloz.to [:page_facing_up:](https://rclone.org/ulozto/)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
* Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/)
* Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata)
* The local filesystem [:page_facing_up:](https://rclone.org/local/)
Please see [the full list of all storage providers and their features](https://rclone.org/overview/)
@@ -130,55 +121,50 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
- Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
- Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
- Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
- Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
* Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
* Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
* Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
* Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
* Compress: compress files [:page_facing_up:](https://rclone.org/compress/)
* Crypt: encrypt files [:page_facing_up:](https://rclone.org/crypt/)
* Hasher: hash files [:page_facing_up:](https://rclone.org/hasher/)
* Union: join multiple remotes to work together [:page_facing_up:](https://rclone.org/union/)
## Features
- MD5/SHA-1 hashes checked at all times for file integrity
- Timestamps preserved on files
- Partial syncs supported on a whole file basis
- [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed
files
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory
identical
- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync
bidirectionally
- [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash
equality
- Can sync to and from network, e.g. two different cloud accounts
- Optional large file chunking ([Chunker](https://rclone.org/chunker/))
- Optional transparent compression ([Compress](https://rclone.org/compress/))
- Optional encryption ([Crypt](https://rclone.org/crypt/))
- Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
- Multi-threaded downloads to local disk
- Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files
over HTTP/WebDAV/FTP/SFTP/DLNA
* MD5/SHA-1 hashes checked at all times for file integrity
* Timestamps preserved on files
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDAV/FTP/SFTP/DLNA
## Installation & documentation
Please see the [rclone website](https://rclone.org/) for:
- [Installation](https://rclone.org/install/)
- [Documentation & configuration](https://rclone.org/docs/)
- [Changelog](https://rclone.org/changelog/)
- [FAQ](https://rclone.org/faq/)
- [Storage providers](https://rclone.org/overview/)
- [Forum](https://forum.rclone.org/)
- ...and more
* [Installation](https://rclone.org/install/)
* [Documentation & configuration](https://rclone.org/docs/)
* [Changelog](https://rclone.org/changelog/)
* [FAQ](https://rclone.org/faq/)
* [Storage providers](https://rclone.org/overview/)
* [Forum](https://forum.rclone.org/)
* ...and more
## Downloads
- <https://rclone.org/downloads/>
* https://rclone.org/downloads/
## License
License
-------
This is free software under the terms of the MIT license (check the
[COPYING file](/COPYING) included in this package).

View File

@@ -4,55 +4,52 @@ This file describes how to make the various kinds of releases
## Extra required software for making a release
- [gh the github cli](https://github.com/cli/cli) for uploading packages
- pandoc for making the html and man pages
* [gh the github cli](https://github.com/cli/cli) for uploading packages
* pandoc for making the html and man pages
## Making a release
- git checkout master # see below for stable branch
- git pull # IMPORTANT
- git status - make sure everything is checked in
- Check GitHub actions build for master is Green
- make test # see integration test server or run locally
- make tag
- edit docs/content/changelog.md # make sure to remove duplicate logs from point
releases
- make tidy
- make doc
- git status - to check for new man pages - git add them
- git commit -a -v -m "Version v1.XX.0"
- make retag
- git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push --follow-tags origin
- \# Wait for the GitHub builds to complete then...
- make fetch_binaries
- make tarball
- make vendorball
- make sign_upload
- make check_sign
- make upload
- make upload_website
- make upload_github
- make startdev # make startstable for stable branch
- \# announce with forum post, twitter post, patreon post
* git checkout master # see below for stable branch
* git pull # IMPORTANT
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push origin # without --follow-tags so it doesn't push the tag if it fails
* git push --follow-tags origin
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
## Update dependencies
Early in the next release cycle update the dependencies.
- Review any pinned packages in go.mod and remove if possible
- `make updatedirect`
- `make GOTAGS=cmount`
- `make compiletest`
- Fix anything which doesn't compile at this point and commit changes here
- `git commit -a -v -m "build: update all dependencies"`
* Review any pinned packages in go.mod and remove if possible
* `make updatedirect`
* `make GOTAGS=cmount`
* `make compiletest`
* Fix anything which doesn't compile at this point and commit changes here
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
```text
go 1.22.0
```
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
in the `go.mod`.
@@ -60,7 +57,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```console
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
@@ -70,7 +67,7 @@ If the `go mod tidy` fails use the output from it to remove the
package which can't be upgraded from `/tmp/potential-upgrades` when
done
```console
```
git co go.mod go.sum
```
@@ -80,12 +77,12 @@ Optionally upgrade the direct and indirect dependencies. This is very
likely to fail if the manual method was used abve - in that case
ignore it as it is too time consuming to fix.
- `make update`
- `make GOTAGS=cmount`
- `make compiletest`
- roll back any updates which didn't compile
- `git commit -a -v --amend`
- **NB** watch out for this changing the default go version in `go.mod`
* `make update`
* `make GOTAGS=cmount`
* `make compiletest`
* roll back any updates which didn't compile
* `git commit -a -v --amend`
* **NB** watch out for this changing the default go version in `go.mod`
Note that `make update` updates all direct and indirect dependencies
and there can occasionally be forwards compatibility problems with
@@ -102,9 +99,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
```console
go run github.com/icholy/gomajor@latest list -major
```
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
@@ -112,9 +107,7 @@ Expect API breakage when updating major versions.
At some point after the release run
```console
bin/tidy-beta v1.55
```
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
@@ -124,64 +117,54 @@ If rclone needs a point release due to some horrendous bug:
Set vars
- BASE_TAG=v1.XX # e.g. v1.52
- NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
- echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
* BASE_TAG=v1.XX # e.g. v1.52
* NEW_TAG=${BASE_TAG}.Y # e.g. v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
- git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
- make startstable
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* make startstable
Now
- git co ${BASE_TAG}-stable
- git cherry-pick any fixes
- make startstable
- Do the steps as above
- git co master
- `#` cherry pick the changes to the changelog - check the diff to make sure it
is correct
- git checkout ${BASE_TAG}-stable docs/content/changelog.md
- git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
- git push
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* make startstable
* Do the steps as above
* git co master
* `#` cherry pick the changes to the changelog - check the diff to make sure it is correct
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Sponsor logos
If updating the website note that the sponsor logos have been moved out of the
main repository.
If updating the website note that the sponsor logos have been moved out of the main repository.
You will need to checkout `/docs/static/img/logos` from <https://github.com/rclone/third-party-logos>
You will need to checkout `/docs/static/img/logos` from https://github.com/rclone/third-party-logos
which is a private repo containing artwork from sponsors.
## Update the website between releases
Create an update website branch based off the last release
```console
git co -b update-website
```
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
```console
git reset --hard v1.64.0
```
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
```console
make upload_test_website
```
make upload_test_website
Check out <https://test.rclone.org> and when happy
Check out https://test.rclone.org and when happy
```console
make upload_website
```
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
@@ -189,14 +172,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
To do a basic build of rclone's docker image to debug builds locally:
```console
```
docker buildx build --load -t rclone/rclone:testing --progress=plain .
docker run --rm rclone/rclone:testing version
```
To test the multipatform build
```console
```
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
```
@@ -204,6 +187,6 @@ To make a full build then set the tags correctly and add `--push`
Note that you can't only build one architecture - you need to build them all.
```console
```
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
```

View File

@@ -1 +1 @@
v1.72.0
v1.71.0

View File

@@ -4,7 +4,6 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/archive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"

View File

@@ -1,679 +0,0 @@
//go:build !plan9
// Package archive implements a backend to access archive files in a remote
package archive
// FIXME factor common code between backends out - eg VFS initialization
// FIXME can we generalize the VFS handle caching and use it in zip backend
// Factor more stuff out if possible
// Odd stats which are probably coming from the VFS
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
// at multiple streams - need cache mode setting?
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
// Import all the required archivers here
_ "github.com/rclone/rclone/backend/archive/squashfs"
_ "github.com/rclone/rclone/backend/archive/zip"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "archive",
Description: "Read archives",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: `Remote to wrap to read archives from.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or "myremote:".
If this is left empty, then the archive backend will use the root as
the remote.
This means that you can use :archive:remote:path and it will be
equivalent to setting remote="remote:path".
`,
Required: false,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// Fs represents a archive of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
f fs.Fs // remote we are wrapping
wrapper fs.Fs // fs that wraps us
mu sync.Mutex // protects the below
archives map[string]*archive // the archives we have, by path
}
// A single open archive
type archive struct {
archiver archiver.Archiver // archiver responsible
remote string // path to the archive
prefix string // prefix to add on to listings
root string // root of the archive to remove from listings
mu sync.Mutex // protects the following variables
f fs.Fs // the archive Fs, may be nil
}
// If remote is an archive then return it otherwise return nil
func findArchive(remote string) *archive {
// FIXME use something faster than linear search?
for _, archiver := range archiver.Archivers {
if strings.HasSuffix(remote, archiver.Extension) {
return &archive{
archiver: archiver,
remote: remote,
prefix: remote,
root: "",
}
}
}
return nil
}
// Find an archive buried in remote
func subArchive(remote string) *archive {
archive := findArchive(remote)
if archive != nil {
return archive
}
parent := path.Dir(remote)
if parent == "/" || parent == "." {
return nil
}
return subArchive(parent)
}
// If remote is an archive then return it otherwise return nil
func (f *Fs) findArchive(remote string) (archive *archive) {
archive = findArchive(remote)
if archive != nil {
f.mu.Lock()
f.archives[remote] = archive
f.mu.Unlock()
}
return archive
}
// Instantiate archive if it hasn't been instantiated yet
//
// This is done lazily so that we can list a directory full of
// archives without opening them all.
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
a.mu.Lock()
defer a.mu.Unlock()
if a.f != nil {
return a.f, nil
}
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
}
a.f = newFs
return a.f, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
origRoot := root
// If remote is empty, use the root instead
if remote == "" {
remote = root
root = ""
}
isDirectory := strings.HasSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
if remote == "" {
remote = "/"
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
}
_ = isDirectory
foundArchive := subArchive(remote)
if foundArchive != nil {
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
// Archive path
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
// Path to the archive
archiveRemote := remote[:len(foundArchive.remote)]
// Remote is archive leaf name
foundArchive.remote = path.Base(archiveRemote)
foundArchive.prefix = ""
// Point remote to archive file
remote = archiveRemote
}
// Make sure to remove trailing . referring to the current dir
if path.Base(root) == "." {
root = strings.TrimSuffix(root, ".")
}
remotePath := fspath.JoinRootPath(remote, root)
wrappedFs, err := cache.Get(ctx, remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
name: name,
//root: path.Join(remotePath, root),
root: origRoot,
opt: *opt,
f: wrappedFs,
archives: make(map[string]*archive),
}
cache.PinUntilFinalized(f.f, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if foundArchive != nil {
fs.Debugf(f, "Root is an archive")
if err != fs.ErrorIsFile {
return nil, fmt.Errorf("expecting to find a file at %q", remote)
}
return foundArchive.init(ctx, f.f)
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("archive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.f.Rmdir(ctx, dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.f.Hashes()
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.f.Mkdir(ctx, dir)
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.f.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantCopy
// }
return do(ctx, src, remote)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantMove
// }
return do(ctx, src, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
do := f.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.f, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
do := f.f.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
notifyFunc(path, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.f.Features().DirCacheFlush
if do != nil {
do()
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
var o fs.Object
var err error
if stream {
o, err = f.f.Features().PutStream(ctx, in, src, options...)
} else {
o, err = f.f.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
return o, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.f.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
// Find the Fs for the directory
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
f.mu.Lock()
defer f.mu.Unlock()
subFs = f.f
// FIXME should do this with a better datastructure like a prefix tree
// FIXME want to find the longest first otherwise nesting won't work
dirSlash := dir + "/"
for archiverRemote, archive := range f.archives {
subRemote := archiverRemote + "/"
if strings.HasPrefix(dirSlash, subRemote) {
subFs, err = archive.init(ctx, f.f)
if err != nil {
return nil, err
}
break
}
}
return subFs, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
entries, err = subFs.List(ctx, dir)
if err != nil {
return nil, err
}
for i, entry := range entries {
// Can only unarchive files
if o, ok := entry.(fs.Object); ok {
remote := o.Remote()
archive := f.findArchive(remote)
if archive != nil {
// Overwrite entry with directory
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
}
}
}
return entries, nil
}
// NewObject creates a new remote archive file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
o, err := subFs.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
}
// Precision is the greatest precision of all the archivers
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
if do := f.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.f.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
return do(ctx, remote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.f.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
o, err := do(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
do := f.f.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
return do(ctx, dirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.f.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
do := f.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, remote, size)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
do := f.f.Features().OpenChunkWriter
if do == nil {
return info, nil, fs.ErrorNotImplemented
}
return do(ctx, remote, src, options...)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.f.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.f.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.OpenChunkWriter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
// FIXME _ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,221 +0,0 @@
//go:build !plan9
package archive
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// FIXME need to test Open with seek
// run - run a shell command
func run(t *testing.T, args ...string) {
cmd := exec.Command(args[0], args[1:]...)
fs.Debugf(nil, "run args = %v", args)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf(`
----------------------------
Failed to run %v: %v
Command output was:
%s
----------------------------
`, args, err, out)
}
}
// check the dst and src are identical
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
t.Run(name, func(t *testing.T) {
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
Farchive, err := cache.Get(ctx, dstArchive)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
Fsrc, err := cache.Get(ctx, src)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
var matches bytes.Buffer
opt := operations.CheckOpt{
Fdst: Farchive,
Fsrc: Fsrc,
Match: &matches,
}
for _, action := range []string{"Check", "Download"} {
t.Run(action, func(t *testing.T) {
matches.Reset()
if action == "Download" {
assert.NoError(t, operations.CheckDownload(ctx, &opt))
} else {
assert.NoError(t, operations.Check(ctx, &opt))
}
if expectedCount > 0 {
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
}
})
}
t.Run("NewObject", func(t *testing.T) {
// Check we can run NewObject on all files and read them
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
if t.Failed() {
return
}
remote := srcObj.Remote()
archiveObj, err := Farchive.NewObject(ctx, remote)
require.NoError(t, err, remote)
assert.Equal(t, remote, archiveObj.Remote(), remote)
// Test that the contents are the same
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
assert.Equal(t, srcBuf, archiveBuf)
if len(srcBuf) < 81 {
return
}
// Tests that Open works with SeekOption
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
// Tests that Open works with RangeOption
for _, test := range []struct {
ro fs.RangeOption
wantStart, wantEnd int
}{
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
} {
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
foundAt := strings.Index(srcBuf, got)
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
}
// Test that the modtimes are correct
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
// Test that the sizes are correct
assert.Equal(t, srcObj.Size(), archiveObj.Size())
// Test that Strings are OK
assert.Equal(t, srcObj.String(), archiveObj.String())
}))
})
// t.Logf("Fdst ------------- %v", Fdst)
// operations.List(ctx, Fdst, os.Stdout)
// t.Logf("Fsrc ------------- %v", Fsrc)
// operations.List(ctx, Fsrc, os.Stdout)
})
}
// test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
ctx := context.Background()
checkFiles := 1000
// create random test input files
inputRoot := t.TempDir()
input := filepath.Join(inputRoot, archiveName)
require.NoError(t, os.Mkdir(input, 0777))
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
// Create the archive
output := t.TempDir()
zipFile := path.Join(output, archiveName)
archiveFn(t, zipFile, input)
// Check the archive itself
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
// Now check a subdirectory
fis, err := os.ReadDir(input)
require.NoError(t, err)
subDir := "NOT FOUND"
aFile := "NOT FOUND"
for _, fi := range fis {
if fi.IsDir() {
subDir = fi.Name()
} else {
aFile = fi.Name()
}
}
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
// Now check a single file
fiCtx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ "+aFile))
require.NoError(t, fi.AddRule("- *"))
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
// Now check the level above
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
}
// Make sure we have the executable named
func skipIfNoExe(t *testing.T, exeName string) {
_, err := exec.LookPath(exeName)
if err != nil {
t.Skipf("%s executable not installed", exeName)
}
}
// Test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func TestArchiveZip(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "zip")
skipIfNoExe(t, "rclone")
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
oldcwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(input))
defer func() {
require.NoError(t, os.Chdir(oldcwd))
}()
run(t, "zip", "-9r", output, ".")
})
}
// Test creating and reading back some archives
//
// Note that this uses rclone and squashfs as external binaries.
func TestArchiveSquashfs(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "mksquashfs")
skipIfNoExe(t, "rclone")
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
run(t, "mksquashfs", input, output)
})
}

View File

@@ -1,67 +0,0 @@
//go:build !plan9
// Test Archive filesystem interface
package archive_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
// In these tests we receive objects from the underlying remote which don't implement these methods
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := t.TempDir()
name := "TestArchiveLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := ":memory:"
name := "TestArchiveMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -1,7 +0,0 @@
// Build for archive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements a backend to access archive files in a remote
package archive

View File

@@ -1,24 +0,0 @@
// Package archiver registers all the archivers
package archiver
import (
"context"
"github.com/rclone/rclone/fs"
)
// Archiver describes an archive package
type Archiver struct {
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
Extension string
}
// Archivers is a slice of all registered archivers
var Archivers []Archiver
// Register adds the archivers provided to the list of known archivers
func Register(as ...Archiver) {
Archivers = append(Archivers, as...)
}

View File

@@ -1,233 +0,0 @@
// Package base is a base archive Fs
package base
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/vfs"
)
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // archive object
remote string // remote of the archive object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
VFS := vfs.New(wrappedFs, nil)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.name
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, errNotImplemented
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
return nil, errNotImplemented
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return -1
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Now()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
return nil, errNotImplemented
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,165 +0,0 @@
package squashfs
// Could just be using bare object Open with RangeRequest which
// would transfer the minimum amount of data but may be slower.
import (
"errors"
"fmt"
"io/fs"
"os"
"sync"
"github.com/diskfs/go-diskfs/backend"
"github.com/rclone/rclone/vfs"
)
// Cache file handles for accessing the file
type cache struct {
node vfs.Node
fhsMu sync.Mutex
fhs []cacheHandle
}
// A cached file handle
type cacheHandle struct {
offset int64
fh vfs.Handle
}
// Make a new cache
func newCache(node vfs.Node) *cache {
return &cache{
node: node,
}
}
// Get a vfs.Handle from the pool or open one
//
// This tries to find an open file handle which doesn't require seeking.
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
if len(c.fhs) > 0 {
// Look for exact match first
for i, cfh := range c.fhs {
if cfh.offset == off {
// fs.Debugf(nil, "CACHE MATCH")
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
return cfh.fh, nil
}
}
// fs.Debugf(nil, "CACHE MISS")
// Just take the first one if not found
cfh := c.fhs[0]
c.fhs = c.fhs[1:]
return cfh.fh, nil
}
fh, err = c.node.Open(os.O_RDONLY)
if err != nil {
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
}
return fh, nil
}
// Close a vfs.Handle or return it to the pool
//
// off should be the offset the file handle would read from without seeking
func (c *cache) close(fh vfs.Handle, off int64) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
c.fhs = append(c.fhs, cacheHandle{
offset: off,
fh: fh,
})
}
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
// error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input
// source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset, ReadAt should
// not affect nor be affected by the underlying seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the same input
// source.
//
// Implementations must not retain p.
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
fh, err := c.open(off)
if err != nil {
return n, err
}
defer func() {
c.close(fh, off+int64(len(p)))
}()
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
return fh.ReadAt(p, off)
}
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
// WriteAt method dummy stub to satisfy interface
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
return 0, errCacheNotImplemented
}
// Seek method dummy stub to satisfy interface
func (c *cache) Seek(offset int64, whence int) (int64, error) {
return 0, errCacheNotImplemented
}
// Read method dummy stub to satisfy interface
func (c *cache) Read(p []byte) (n int, err error) {
return 0, errCacheNotImplemented
}
func (c *cache) Stat() (fs.FileInfo, error) {
return nil, errCacheNotImplemented
}
// Close the file
func (c *cache) Close() (err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
// Close any open file handles
for i := range c.fhs {
fh := &c.fhs[i]
newErr := fh.fh.Close()
if err == nil {
err = newErr
}
}
c.fhs = nil
return err
}
// Sys returns OS-specific file for ioctl calls via fd
func (c *cache) Sys() (*os.File, error) {
return nil, errCacheNotImplemented
}
// Writable returns file for read-write operations
func (c *cache) Writable() (backend.WritableFile, error) {
return nil, errCacheNotImplemented
}
// check interfaces
var _ backend.Storage = (*cache)(nil)

View File

@@ -1,446 +0,0 @@
// Package squashfs implements a squashfs archiver for the archive backend
package squashfs
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".sqfs",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
sqfs *squashfs.FileSystem // interface to the squashfs
c *cache
node vfs.Node // squashfs file object - set if reading
remote string // remote of the squashfs file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
c := newCache(node)
// FIXME blocksize
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
if err != nil {
return nil, fmt.Errorf("failed to read squashfs: %w", err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
sqfs: sqfs,
c: c,
remote: remote,
root: strings.Trim(root, "/"),
prefix: prefix,
prefixSlash: prefix + "/",
}
if prefix == "" {
f.prefixSlash = ""
}
singleObject := false
// Find the directory the root points to
if f.root != "" && !strings.HasSuffix(root, "/") {
native, err := f.toNative("")
if err == nil {
native = strings.TrimRight(native, "/")
_, err := f.newObjectNative(native)
if err == nil {
// If it pointed to a file, find the directory above
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
}
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gsquashfs
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Squashfs %q", f.name)
}
// This turns a remote into a native path in the squashfs starting with a /
func (f *Fs) toNative(remote string) (string, error) {
native := strings.Trim(remote, "/")
if f.prefix == "" {
native = "/" + native
} else if native == f.prefix {
native = "/"
} else if !strings.HasPrefix(native, f.prefixSlash) {
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
} else {
native = native[len(f.prefix):]
}
if f.root != "" {
native = "/" + f.root + native
}
return native, nil
}
// Turn a (nativeDir, leaf) into a remote
func (f *Fs) fromNative(nativeDir string, leaf string) string {
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
dir := nativeDir
if f.root != "" {
dir = strings.TrimPrefix(dir, "/"+f.root)
}
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
return remote
}
// Convert a FileInfo into an Object from native dir
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
return &Object{
fs: f,
remote: f.fromNative(nativeDir, item.Name()),
size: item.Size(),
modTime: item.ModTime(),
item: item,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
nativeDir, err := f.toNative(dir)
if err != nil {
return nil, err
}
items, err := f.sqfs.ReadDir(nativeDir)
if err != nil {
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
}
entries = make(fs.DirEntries, 0, len(items))
for _, fi := range items {
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
// fs.Debugf(item.Name(), "entry = %#v", item)
var entry fs.DirEntry
if err != nil {
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
}
if item.IsDir() {
var remote = f.fromNative(nativeDir, item.Name())
entry = fs.NewDir(remote, item.ModTime())
} else {
if item.Mode().IsRegular() {
entry = f.objectFromFileInfo(nativeDir, item)
} else {
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
continue
}
}
entries = append(entries, entry)
}
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// newObjectNative finds the object at the native path passed in
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
// get the path and filename
dir, leaf := path.Split(nativePath)
dir = strings.TrimRight(dir, "/")
leaf = strings.Trim(leaf, "/")
// FIXME need to detect directory not found
fis, err := f.sqfs.ReadDir(dir)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
for _, fi := range fis {
if fi.Name() == leaf {
if fi.IsDir() {
return nil, fs.ErrorNotAFile
}
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
o = f.objectFromFileInfo(dir, item)
break
}
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return o, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
nativePath, err := f.toNative(remote)
if err != nil {
return nil, err
}
return f.newObjectNative(nativePath)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw squashfs file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
item squashfs.FileStat
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Turn a squashfs path into a full path for the parent Fs
// func (o *Object) path(remote string) string {
// return path.Join(o.fs.prefix, remote)
// }
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
remote, err := o.fs.toNative(o.remote)
if err != nil {
return nil, err
}
fs.Debugf(o, "Opening %q", remote)
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
fh, err := o.item.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = fh.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
return readers.NewLimitedReadCloser(fh, limit), nil
}
return fh, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,385 +0,0 @@
// Package zip implements a zip archiver for the archive backend
package zip
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".zip",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // zip file object - set if reading
remote string // remote of the zip file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
dt dirtree.DirTree // read from zipfile
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// Read the contents of the zip file
singleObject, err := f.readZip()
if err != nil {
return nil, fmt.Errorf("failed to open zip file: %w", err)
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Zip %q", f.name)
}
// readZip the zip file into f
//
// Returns singleObject=true if f.root points to a file
func (f *Fs) readZip() (singleObject bool, err error) {
if f.node == nil {
return singleObject, fs.ErrorDirNotFound
}
size := f.node.Size()
if size < 0 {
return singleObject, errors.New("can't read from zip file with unknown size")
}
r, err := f.node.Open(os.O_RDONLY)
if err != nil {
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
}
zr, err := zip.NewReader(r, size)
if err != nil {
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
}
dt := dirtree.New()
for _, file := range zr.File {
remote := strings.Trim(path.Clean(file.Name), "/")
if remote == "." {
remote = ""
}
remote = path.Join(f.prefix, remote)
if f.root != "" {
// Ignore all files outside the root
if !strings.HasPrefix(remote, f.root) {
continue
}
if remote == f.root {
remote = ""
} else {
remote = strings.TrimPrefix(remote, f.root+"/")
}
}
if strings.HasSuffix(file.Name, "/") {
dir := fs.NewDir(remote, file.Modified)
dt.AddDir(dir)
} else {
if remote == "" {
remote = path.Base(f.root)
singleObject = true
dt = dirtree.New()
}
o := &Object{
f: f,
remote: remote,
fh: &file.FileHeader,
file: file,
}
dt.Add(o)
if singleObject {
break
}
}
}
dt.CheckParents("")
dt.Sort()
f.dt = dt
//fs.Debugf(nil, "dt = %v", dt)
return singleObject, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// _, err = f.strip(dir)
// if err != nil {
// return nil, err
// }
entries, ok := f.dt[dir]
if !ok {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
if f.dt == nil {
return nil, fs.ErrorObjectNotFound
}
_, entry := f.dt.Find(remote)
if entry == nil {
return nil, fs.ErrorObjectNotFound
}
o, ok := entry.(*Object)
if !ok {
return nil, fs.ErrorNotAFile
}
return o, nil
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
fh *zip.FileHeader
file *zip.File
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.fh.UncompressedSize64)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.fh.Modified
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if ht == hash.CRC32 {
// FIXME return empty CRC if writing
if o.f.dt == nil {
return "", nil
}
return fmt.Sprintf("%08x", o.fh.CRC32), nil
}
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
rc, err = o.file.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = io.CopyN(io.Discard, rc, offset)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
return readers.NewLimitedReadCloser(rc, limit), nil
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -51,7 +51,6 @@ import (
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/sync/errgroup"
)
@@ -1338,9 +1337,9 @@ func (f *Fs) containerOK(container string) bool {
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if !f.containerOK(containerName) {
return fs.ErrorDirNotFound
return nil, fs.ErrorDirNotFound
}
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -1348,16 +1347,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(containerName)
return nil
return entries, nil
}
// listContainers returns all the containers to out
@@ -1393,47 +1392,14 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return fs.ErrorListBucketRequired
return nil, fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
}
return f.listContainers(ctx)
}
return list.Flush()
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -2152,6 +2118,7 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
}
metadata = make(map[string]*string, len(o.meta))
for k, v := range o.meta {
v := v
metadata[k] = &v
}
return metadata
@@ -2703,13 +2670,6 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
@@ -3186,7 +3146,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -56,7 +56,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/readers"
@@ -454,7 +453,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
options := azidentity.AzureCLICredentialOptions{}
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
@@ -551,7 +550,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
case opt.UseMSI:
// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
// Validate and ensure exactly one is set. (To do: better validation.)
b2i := map[bool]int{false: 0, true: 1}
var b2i = map[bool]int{false: 0, true: 1}
set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
if set > 1 {
return nil, errors.New("more than one user-assigned identity ID is set")
@@ -584,6 +583,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
token, err := msiCred.GetToken(context.Background(), policy.TokenRequestOptions{
Scopes: []string{"api://AzureADTokenExchange"},
})
if err != nil {
return "", fmt.Errorf("failed to acquire MSI token: %w", err)
}
@@ -844,35 +844,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
//
// This should return ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
var entries fs.DirEntries
subDirClient := f.dirClient(dir)
// Checking whether directory exists
_, err := subDirClient.GetProperties(ctx, nil)
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
return fs.ErrorDirNotFound
return entries, fs.ErrorDirNotFound
} else if err != nil {
return err
return entries, err
}
opt := &directory.ListFilesAndDirectoriesOptions{
var opt = &directory.ListFilesAndDirectoriesOptions{
Include: directory.ListFilesInclude{
Timestamps: true,
},
@@ -881,7 +864,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
for pager.More() {
resp, err := pager.NextPage(ctx)
if err != nil {
return err
return entries, err
}
for _, directory := range resp.Segment.Directories {
// Name *string `xml:"Name"`
@@ -907,10 +890,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if directory.Properties.ContentLength != nil {
entry.SetSize(*directory.Properties.ContentLength)
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
}
for _, file := range resp.Segment.Files {
leaf := f.opt.Enc.ToStandardPath(*file.Name)
@@ -924,13 +904,10 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if file.Properties.LastWriteTime != nil {
entry.modTime = *file.Properties.LastWriteTime
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
}
}
return list.Flush()
return entries, nil
}
// ------------------------------------------------------------
@@ -1037,10 +1014,6 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
SMBProperties: &file.SMBProperties{
LastWriteTime: &t,
},
HTTPHeaders: &file.HTTPHeaders{
ContentMD5: o.md5,
ContentType: &o.contentType,
},
}
_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
if err != nil {
@@ -1337,29 +1310,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
srcURL := srcObj.fileClient().URL()
fc := f.fileClient(remote)
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
if err != nil {
return nil, fmt.Errorf("Copy failed: %w", err)
}
// Poll for completion if necessary
//
// The for loop is never executed for same storage account copies.
copyStatus := startCopy.CopyStatus
var properties file.GetPropertiesResponse
pollTime := 100 * time.Millisecond
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
time.Sleep(pollTime)
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
if err != nil {
return nil, err
}
copyStatus = properties.CopyStatus
pollTime = min(2*pollTime, time.Second)
}
dstObj, err := f.NewObject(ctx, remote)
if err != nil {
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
@@ -1474,7 +1428,6 @@ var (
_ fs.DirMover = &Fs{}
_ fs.Copier = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -48,14 +48,6 @@ type LifecycleRule struct {
FileNamePrefix string `json:"fileNamePrefix"`
}
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
type ServerSideEncryption struct {
Mode string `json:"mode"`
Algorithm string `json:"algorithm"` // Encryption algorithm to use
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
}
// Timestamp is a UTC time when this file was uploaded. It is a base
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
// fits in a 64 bit integer such as the type "long" in the programming
@@ -269,22 +261,21 @@ type GetFileInfoRequest struct {
//
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
type StartLargeFileRequest struct {
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
}
// StartLargeFileResponse is the response to StartLargeFileRequest
type StartLargeFileResponse struct {
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
AccountID string `json:"accountId"` // The identifier for the account.
BucketID string `json:"bucketId"` // The unique ID of the bucket.
ContentType string `json:"contentType"` // The MIME type of the file.
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
}
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
@@ -334,25 +325,21 @@ type CancelLargeFileResponse struct {
// CopyFileRequest is as passed to b2_copy_file
type CopyFileRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
Name string `json:"fileName"` // The name of the new file being created.
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
}
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
type CopyPartRequest struct {
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}
// UpdateBucketRequest describes a request to modify a B2 bucket

View File

@@ -8,9 +8,7 @@ import (
"bufio"
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -55,9 +53,6 @@ const (
nameHeader = "X-Bz-File-Name"
timestampHeader = "X-Bz-Upload-Timestamp"
retryAfterHeader = "Retry-After"
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
minSleep = 10 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
@@ -72,7 +67,7 @@ const (
// Globals
var (
errNotWithVersions = errors.New("can't modify files in --b2-versions mode")
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
)
@@ -257,51 +252,6 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
Default: (encoder.Display |
encoder.EncodeBackSlash |
encoder.EncodeInvalidUtf8),
}, {
Name: "sse_customer_algorithm",
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "Advanced Encryption Standard (256 bits key length)",
}},
}, {
Name: "sse_customer_key",
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key-base64.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_base64",
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
Alternatively you can provide --sse-customer-key.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}, {
Name: "sse_customer_key_md5",
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
Sensitive: true,
}},
})
}
@@ -324,10 +274,6 @@ type Options struct {
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"`
Enc encoder.MultiEncoder `config:"encoding"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
}
// Fs represents a remote b2 server
@@ -558,24 +504,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Endpoint == "" {
opt.Endpoint = defaultEndpoint
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
} else if opt.SSECustomerKeyBase64 != "" {
// Decode the Base64-encoded key and store it in the SSECustomerKey field
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
if err != nil {
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
}
opt.SSECustomerKey = string(decoded)
} else {
// Encode the raw key as Base64
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
// Calculate CustomerKeyMd5 if not supplied
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
}
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
@@ -919,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
last := ""
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
@@ -927,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -962,46 +890,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1507,16 +1403,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
Name: f.opt.Enc.FromStandardPath(dstPath),
DestBucketID: destBucketID,
}
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
serverSideEncryptionConfig := api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: f.opt.SSECustomerAlgorithm,
CustomerKey: f.opt.SSECustomerKeyBase64,
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
}
request.SourceServerSideEncryption = &serverSideEncryptionConfig
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
}
if newInfo == nil {
request.MetadataDirective = "COPY"
} else {
@@ -1948,10 +1834,9 @@ var _ io.ReadCloser = &openFile{}
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
opts := rest.Opts{
Method: method,
Options: options,
NoResponse: method == "HEAD",
ExtraHeaders: map[string]string{},
Method: method,
Options: options,
NoResponse: method == "HEAD",
}
// Use downloadUrl from backblaze if downloadUrl is not set
@@ -1969,11 +1854,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
bucket, bucketPath := o.split()
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
}
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
return o.fs.shouldRetry(ctx, resp, err)
@@ -2238,11 +2118,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
},
ContentLength: &size,
}
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
}
var response api.FileInfo
// Don't retry, return a retry error instead
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -2317,27 +2192,20 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
return info, nil, err
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: up.chunkSize,
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
return info, up, nil
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
if o.fs.opt.Versions {
t, path := api.RemoveVersion(bucketPath)
if !t.IsZero() {
return o.fs.deleteByID(ctx, o.id, path)
}
return errNotWithVersions
}
if o.fs.opt.VersionAt.IsSet() {
return errNotWithVersionAt
@@ -2360,36 +2228,32 @@ func (o *Object) ID() string {
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket.",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
` + "```console" + `
rclone backend lifecycle b2:bucket
` + "```" + `
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
` + "```json" + `
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
` + "```" + `
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `.
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
` + "```console" + `
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
` + "```" + `
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
@@ -2401,17 +2265,14 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
` + "```console" + `
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
` + "```" + `
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`,
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": `After a file has been hidden for this many days
it is deleted. 0 is off.`,
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
large file versions after this many days.`,
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
},
}
@@ -2494,14 +2355,13 @@ max-age, which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
` + "```console" + `
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
` + "```" + `
rclone backend cleanup b2:bucket/path/to/object
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete.",
"max-age": "Max age of upload to delete",
},
}
@@ -2524,9 +2384,8 @@ var cleanupHiddenHelp = fs.CommandHelp{
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
` + "```console" + `
rclone backend cleanup-hidden b2:bucket/path/to/dir
` + "```",
rclone backend cleanup-hidden b2:bucket/path/to/dir
`,
}
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
@@ -2569,7 +2428,6 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}

View File

@@ -144,14 +144,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
request.ContentType = newInfo.ContentType
request.Info = newInfo.Info
}
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
request.ServerSideEncryption = &api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: o.fs.opt.SSECustomerAlgorithm,
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
}
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_start_large_file",
@@ -303,12 +295,6 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
ContentLength: &sizeWithHash,
}
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
@@ -348,17 +334,6 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
PartNumber: int64(part + 1),
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
serverSideEncryptionConfig := api.ServerSideEncryption{
Mode: "SSE-C",
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
}
request.SourceServerSideEncryption = &serverSideEncryptionConfig
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
}
var response api.UploadPartResponse
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)

View File

@@ -125,21 +125,10 @@ type FolderItems struct {
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
// There is some confusion about how this is actually
// returned. The []struct has worked for many years, but in
// https://github.com/rclone/rclone/issues/8776 box was
// returning it returned not as a list. We don't actually use
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
}
// Parent defined the ID of the parent directory
@@ -282,9 +271,9 @@ type User struct {
ModifiedAt time.Time `json:"modified_at"`
Language string `json:"language"`
Timezone string `json:"timezone"`
SpaceAmount float64 `json:"space_amount"`
SpaceUsed float64 `json:"space_used"`
MaxUploadSize float64 `json:"max_upload_size"`
SpaceAmount int64 `json:"space_amount"`
SpaceUsed int64 `json:"space_used"`
MaxUploadSize int64 `json:"max_upload_size"`
Status string `json:"status"`
JobTitle string `json:"job_title"`
Phone string `json:"phone"`

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -87,11 +86,13 @@ func init() {
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
var err error
// If using box config.json, use JWT auth
if usesJWTAuth(m) {
err = refreshJWTToken(ctx, name, m)
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
}
@@ -112,11 +113,6 @@ func init() {
}, {
Name: "box_config_file",
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
}, {
Name: "config_credentials",
Help: "Box App config.json contents.\n\nLeave blank normally.",
Hide: fs.OptionHideBoth,
Sensitive: true,
}, {
Name: "access_token",
Help: "Box App Primary Access Token\n\nLeave blank normally.",
@@ -187,17 +183,9 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
})
}
func usesJWTAuth(m configmap.Mapper) bool {
jsonFile, okFile := m.Get("box_config_file")
jsonFileCredentials, okCredentials := m.Get("config_credentials")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
}
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
boxSubType, _ := m.Get("box_sub_type")
boxConfig, err := getBoxConfig(m)
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
return fmt.Errorf("get box config: %w", err)
}
@@ -216,19 +204,12 @@ func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error
return err
}
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) {
configFileCredentials, _ := m.Get("config_credentials")
configFileBytes := []byte(configFileCredentials)
if configFileCredentials == "" {
configFile, _ := m.Get("box_config_file")
configFileBytes, err = os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}
err = json.Unmarshal(configFileBytes, &boxConfig)
err = json.Unmarshal(file, &boxConfig)
if err != nil {
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
}
@@ -503,12 +484,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
if ts != nil {
// If using box config.json and JWT, renewing should just refresh the token and
// should do so whether there are uploads pending or not.
if usesJWTAuth(m) {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
err := refreshJWTToken(ctx, name, m)
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
return err
})
f.tokenRenewer.Start()
@@ -721,27 +705,9 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
@@ -751,22 +717,14 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
err = list.Add(d)
if err != nil {
iErr = err
return true
}
entries = append(entries, d)
} else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
err = list.Add(o)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
@@ -782,12 +740,12 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return false
})
if err != nil {
return err
return nil, err
}
if iErr != nil {
return iErr
return nil, iErr
}
return list.Flush()
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
@@ -1783,7 +1741,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)

View File

@@ -684,7 +684,7 @@ func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
start, end int64
}
parseChunks := func(ranges string) (crs []chunkRange, err error) {
for part := range strings.SplitSeq(ranges, ",") {
for _, part := range strings.Split(ranges, ",") {
var start, end int64 = 0, math.MaxInt64
switch ints := strings.Split(part, ":"); len(ints) {
case 1:

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -187,6 +187,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
g, gCtx := errgroup.WithContext(ctx)
var mu sync.Mutex
for _, upstream := range opt.Upstreams {
upstream := upstream
g.Go(func() (err error) {
equal := strings.IndexRune(upstream, '=')
if equal < 0 {
@@ -240,22 +241,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove, slowHash := true, false
canMove := true
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
slowHash = slowHash || u.f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {
@@ -369,6 +366,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream) error) error {
g, gCtx := errgroup.WithContext(ctx)
for _, u := range f.upstreams {
u := u
g.Go(func() (err error) {
return fn(gCtx, u)
})
@@ -635,6 +633,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var uChans []chan time.Duration
for _, u := range f.upstreams {
u := u
if do := u.f.Features().ChangeNotify; do != nil {
ch := make(chan time.Duration)
uChans = append(uChans, ch)

View File

@@ -2,8 +2,10 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/binary"
"encoding/hex"
@@ -44,7 +46,6 @@ const (
minCompressionRatio = 1.1
gzFileExt = ".gz"
zstdFileExt = ".zst"
metaFileExt = ".json"
uncompressedFileExt = ".bin"
)
@@ -53,7 +54,6 @@ const (
const (
Uncompressed = 0
Gzip = 2
Zstd = 4
)
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
@@ -66,10 +66,6 @@ func init() {
Value: "gzip",
Help: "Standard gzip compression with fastest parameters.",
},
{
Value: "zstd",
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
},
}
// Register our remote
@@ -91,23 +87,17 @@ func init() {
Examples: compressionModeOptions,
}, {
Name: "level",
Help: `GZIP (levels -2 to 9):
- -2 — Huffman encoding only. Only use if you know what you're doing.
- -1 (default) — recommended; equivalent to level 5.
- 0 — turns off compression.
- 19 — increase compression at the cost of speed. Going past 6 generally offers very little return.
ZSTD (levels 0 to 4):
- 0 — turns off compression entirely.
- 1 — fastest compression with the lowest ratio.
- 2 (default) — good balance of speed and compression.
- 3 — better compression, but uses about 23x more CPU than the default.
- 4 — best possible compression ratio (highest CPU cost).
Notes:
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
Required: true,
Help: `GZIP compression level (-2 to 9).
Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffman encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
Advanced: true,
}, {
Name: "ram_cache_limit",
Help: `Some remotes don't allow the upload of files with unknown size.
@@ -122,47 +112,6 @@ this limit will be cached on disk.`,
})
}
// compressionModeHandler defines the interface for handling different compression modes
type compressionModeHandler interface {
// processFileNameGetFileExtension returns the file extension for the given compression mode
processFileNameGetFileExtension(compressionMode int) string
// newObjectGetOriginalSize returns the original file size from the metadata
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
isCompressible(r io.Reader, compressionMode int) (bool, error)
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error)
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error)
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
@@ -176,13 +125,12 @@ type Options struct {
// Fs represents a wrapped fs.Fs
type Fs struct {
fs.Fs
wrapper fs.Fs
name string
root string
opt Options
mode int // compression mode id
features *fs.Features // optional features
modeHandler compressionModeHandler // compression mode handler
wrapper fs.Fs
name string
root string
opt Options
mode int // compression mode id
features *fs.Features // optional features
}
// NewFs constructs an Fs from the path, container:path
@@ -219,28 +167,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
}
compressionMode := compressionModeFromName(opt.CompressionMode)
var modeHandler compressionModeHandler
switch compressionMode {
case Gzip:
modeHandler = &gzipModeHandler{}
case Zstd:
modeHandler = &zstdModeHandler{}
case Uncompressed:
modeHandler = &uncompressedModeHandler{}
default:
modeHandler = &unknownModeHandler{}
}
// Create the wrapping fs
f := &Fs{
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
mode: compressionMode,
modeHandler: modeHandler,
Fs: wrappedFs,
name: name,
root: rpath,
opt: *opt,
mode: compressionModeFromName(opt.CompressionMode),
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
@@ -282,13 +215,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
return f, err
}
// compressionModeFromName converts a compression mode name to its int representation.
func compressionModeFromName(name string) int {
switch name {
case "gzip":
return Gzip
case "zstd":
return Zstd
default:
return Uncompressed
}
@@ -312,7 +242,7 @@ func base64ToInt64(str string) (int64, error) {
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
// Returns -2 for the original size if the file is uncompressed.
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
// Separate the filename and size from the extension
extensionPos := strings.LastIndex(compressedFileName, ".")
if extensionPos == -1 {
@@ -331,8 +261,7 @@ func processFileName(compressedFileName string, modeHandler compressionModeHandl
if err != nil {
return "", "", 0, errors.New("could not decode size")
}
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
return match[1], ext, size, nil
return match[1], gzFileExt, size, nil
}
// Generates the file name for a metadata file
@@ -357,15 +286,11 @@ func unwrapMetadataFile(filename string) (string, bool) {
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
switch mode {
case Gzip:
if mode != Uncompressed {
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
case Zstd:
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
default:
} else {
newRemote = remote + uncompressedFileExt
}
return newRemote
}
@@ -379,7 +304,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
// addData parses an object and adds it to the DirEntries
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
origFileName, _, size, err := processFileName(o.Remote())
if err != nil {
fs.Errorf(o, "Error on parsing file name: %v", err)
return
@@ -502,12 +427,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
}
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
if err != nil {
return nil, fmt.Errorf("error reading metadata: %w", err)
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil {
return nil, err
}
@@ -516,7 +437,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// checkCompressAndType checks if an object is compressible and determines it's mime type
// returns a multireader with the bytes that were read to determine mime type
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
in, wrap := accounting.UnWrap(in)
buf := make([]byte, heuristicBytes)
n, err := in.Read(buf)
@@ -525,7 +446,7 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
return nil, false, "", err
}
mime := mimetype.Detect(buf)
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
compressible, err = isCompressible(bytes.NewReader(buf))
if err != nil {
return nil, false, "", err
}
@@ -533,6 +454,26 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
return wrap(in), compressible, mime.String(), nil
}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func isCompressible(r io.Reader) (bool, error) {
var b bytes.Buffer
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
if err != nil {
return false, err
}
n, err := io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// verifyObjectHash verifies the Objects hash
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
srcHash := hasher.Sums()[ht]
@@ -553,9 +494,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
type compressionResult struct {
err error
meta T
meta sgzip.GzipMetadata
}
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
@@ -596,18 +537,106 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
}
if _, err = tempFile.Seek(0, 0); err != nil {
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
return nil, err
}
finfo, err := tempFile.Stat()
if err != nil {
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
return nil, err
}
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
}
// Put a compressed version of a file. Returns a wrappable object and metadata.
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
results := make(chan compressionResult)
go func() {
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
if err != nil {
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
return
}
_, err = io.Copy(gz, in)
gzErr := gz.Close()
if gzErr != nil {
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
if err == nil {
err = gzErr
}
}
closeErr := pipeWriter.Close()
if closeErr != nil {
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
if err == nil {
err = closeErr
}
}
results <- compressionResult{err: err, meta: gz.MetaData()}
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
// Find a hash the destination supports to compute a hash of
// the compressed data.
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
// unwrap the accounting again
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
// add the hasher and re-wrap the accounting
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
if removeErr != nil {
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
}
}
return nil, nil, err
}
// Check whether we got an error during compression
result := <-results
err = result.err
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
if removeErr != nil {
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
}
}
return nil, nil, err
}
// Generate metadata
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
// Check the hashes of the compressed data if we were comparing them
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
@@ -651,8 +680,7 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if err != nil {
return nil, nil, err
}
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
@@ -723,7 +751,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
o, err := f.NewObject(ctx, src.Remote())
if err == fs.ErrorObjectNotFound {
// Get our file compressibility
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil {
return nil, err
}
@@ -743,7 +771,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
found := err == nil
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil {
return nil, err
}
@@ -1062,12 +1090,11 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
// ObjectMetadata describes the metadata for an Object.
type ObjectMetadata struct {
Mode int // Compression mode of the file.
Size int64 // Size of the object.
MD5 string // MD5 hash of the file.
MimeType string // Mime type of the file
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
Mode int // Compression mode of the file.
Size int64 // Size of the object.
MD5 string // MD5 hash of the file.
MimeType string // Mime type of the file
CompressionMetadata sgzip.GzipMetadata
}
// Object with external metadata
@@ -1080,6 +1107,17 @@ type Object struct {
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
}
// This function generates a metadata object
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
meta := new(ObjectMetadata)
meta.Size = size
meta.Mode = mode
meta.CompressionMetadata = cmeta
meta.MD5 = md5
meta.MimeType = mimeType
return meta
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
// Open our meradata object
@@ -1127,7 +1165,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.mo, o.mo.Update(ctx, in, src, options...)
}
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
in, compressible, mimeType, err := checkCompressAndType(in)
if err != nil {
return err
}
@@ -1240,7 +1278,7 @@ func (o *Object) String() string {
// Remote returns the remote path
func (o *Object) Remote() string {
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
origFileName, _, _, err := processFileName(o.Object.Remote())
if err != nil {
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
return o.Object.Remote()
@@ -1343,6 +1381,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
return o.Object.Open(ctx, options...)
}
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -1350,12 +1389,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
openOptions = append(openOptions, option)
}
}
// Get a chunkedreader for the wrapped object
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
var retCloser io.Closer = chunkedReader
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
// Get file handle
var file io.Reader
if offset != 0 {
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
} else {
file, err = sgzip.NewReader(chunkedReader)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
}
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source

View File

@@ -48,27 +48,7 @@ func TestRemoteGzip(t *testing.T) {
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "gzip"},
{Name: name, Key: "level", Value: "-1"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
}
// TestRemoteZstd tests ZSTD compression
func TestRemoteZstd(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
name := "TestCompressZstd"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "mode", Value: "zstd"},
{Name: name, Key: "level", Value: "2"},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)

View File

@@ -1,207 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/buengese/sgzip"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// gzipModeHandler implements compressionModeHandler for gzip
type gzipModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataGzip == nil {
return 0, errors.New("missing gzip metadata")
}
return meta.CompressionMetadataGzip.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (g *gzipModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
} else {
file, err = sgzip.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Gzip {
return gzFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (g *gzipModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
go func() {
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
if err != nil {
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
close(resultsGzip)
return
}
_, err = io.Copy(gz, in)
gzErr := gz.Close()
if gzErr != nil && err == nil {
err = gzErr
}
closeErr := pipeWriter.Close()
if closeErr != nil && err == nil {
err = closeErr
}
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
close(resultsGzip)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
// Find a hash the destination supports to compute a hash of
// the compressed data.
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
// unwrap the accounting again
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
// add the hasher and re-wrap the accounting
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
}
}
return nil, nil, err
}
// Check whether we got an error during compression
result := <-resultsGzip
if result.err != nil {
if o != nil {
if removeErr := o.Remove(ctx); removeErr != nil {
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
}
}
return nil, nil, result.err
}
// Generate metadata
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
// Check the hashes of the compressed data if we were comparing them
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(sgzip.GzipMetadata)
if !ok {
panic("invalid cmeta type: expected sgzip.GzipMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = &meta
objMeta.CompressionMetadataZstd = nil
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -1,327 +0,0 @@
package compress
import (
"context"
"errors"
"io"
"runtime"
"sync"
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
"github.com/klauspost/compress/zstd"
)
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
// SzstdMetadata holds metadata for szstd compressed files.
type SzstdMetadata struct {
BlockSize int // BlockSize is the size of the blocks in the zstd file
Size int64 // Size is the uncompressed size of the file
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
}
// SzstdWriter is a writer that compresses data in szstd format.
type SzstdWriter struct {
enc *zstd.Encoder
w szstd.ConcurrentWriter
metadata SzstdMetadata
mu sync.Mutex
}
// NewWriterSzstd creates a new szstd writer with the specified options.
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
encoder, err := zstd.NewWriter(nil, opts...)
if err != nil {
return nil, err
}
sw, err := szstd.NewWriter(w, encoder)
if err != nil {
if err := encoder.Close(); err != nil {
return nil, err
}
return nil, err
}
return &SzstdWriter{
enc: encoder,
w: sw,
metadata: SzstdMetadata{
BlockSize: szstdChunkSize,
Size: 0,
},
}, nil
}
// Write writes data to the szstd writer in chunks of szstdChunkSize.
// It handles the block size and metadata updates automatically.
func (w *SzstdWriter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if w.metadata.BlockData == nil {
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
w.metadata.BlockData[0] = 0
}
start := 0
total := len(p)
var writerFunc szstd.FrameSource = func() ([]byte, error) {
if start >= total {
return nil, nil
}
end := min(start+w.metadata.BlockSize, total)
chunk := p[start:end]
size := end - start
w.mu.Lock()
w.metadata.Size += int64(size)
w.mu.Unlock()
start = end
return chunk, nil
}
// write sizes of compressed blocks in the callback
err := w.w.WriteMany(context.Background(), writerFunc,
szstd.WithWriteCallback(func(size uint32) {
w.mu.Lock()
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
w.mu.Unlock()
}),
)
if err != nil {
return 0, err
}
return total, nil
}
// Close closes the SzstdWriter and its underlying encoder.
func (w *SzstdWriter) Close() error {
if err := w.w.Close(); err != nil {
return err
}
if err := w.enc.Close(); err != nil {
return err
}
return nil
}
// GetMetadata returns the metadata of the szstd writer.
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
return w.metadata
}
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
type SzstdReaderAt struct {
r szstd.Reader
decoder *zstd.Decoder
metadata *SzstdMetadata
pos int64
mu sync.Mutex
}
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
decoder, err := zstd.NewReader(nil, opts...)
if err != nil {
return nil, err
}
r, err := szstd.NewReader(rs, decoder)
if err != nil {
decoder.Close()
return nil, err
}
sr := &SzstdReaderAt{
r: r,
decoder: decoder,
metadata: meta,
pos: 0,
}
// Set initial position to the provided offset
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
if err := sr.Close(); err != nil {
return nil, err
}
return nil, err
}
return sr, nil
}
// Seek sets the offset for the next Read.
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
pos, err := s.r.Seek(offset, whence)
if err == nil {
s.pos = pos
}
return pos, err
}
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
s.mu.Lock()
defer s.mu.Unlock()
n, err := s.r.Read(p)
if err == nil {
s.pos += int64(n)
}
return n, err
}
// ReadAt reads data at the specified offset.
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
if off < 0 {
return 0, errors.New("invalid offset")
}
if off >= s.metadata.Size {
return 0, io.EOF
}
endOff := min(off+int64(len(p)), s.metadata.Size)
// Find all blocks covered by the range
type blockInfo struct {
index int // Block index
offsetInBlock int64 // Offset within the block for starting reading
bytesToRead int64 // How many bytes to read from this block
}
var blocks []blockInfo
uncompressedOffset := int64(0)
currentOff := off
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
offsetInBlock := max(0, currentOff-uncompressedOffset)
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
blocks = append(blocks, blockInfo{
index: i,
offsetInBlock: offsetInBlock,
bytesToRead: bytesToRead,
})
currentOff += bytesToRead
if currentOff >= endOff {
break
}
}
uncompressedOffset = blockUncompressedEnd
}
if len(blocks) == 0 {
return 0, io.EOF
}
// Parallel block decoding
type decodeResult struct {
index int
data []byte
err error
}
resultCh := make(chan decodeResult, len(blocks))
var wg sync.WaitGroup
sem := make(chan struct{}, runtime.NumCPU())
for _, block := range blocks {
wg.Add(1)
go func(block blockInfo) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
startOffset := int64(s.metadata.BlockData[block.index])
endOffset := int64(s.metadata.BlockData[block.index+1])
compressedSize := endOffset - startOffset
compressed := make([]byte, compressedSize)
_, err := s.r.ReadAt(compressed, startOffset)
if err != nil && err != io.EOF {
resultCh <- decodeResult{index: block.index, err: err}
return
}
decoded, err := s.decoder.DecodeAll(compressed, nil)
if err != nil {
resultCh <- decodeResult{index: block.index, err: err}
return
}
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
}(block)
}
go func() {
wg.Wait()
close(resultCh)
}()
// Collect results in block index order
totalRead := 0
results := make(map[int]decodeResult)
expected := len(blocks)
minIndex := blocks[0].index
for res := range resultCh {
results[res.index] = res
for {
if result, ok := results[minIndex]; ok {
if result.err != nil {
return 0, result.err
}
// find the corresponding blockInfo
var blk blockInfo
for _, b := range blocks {
if b.index == result.index {
blk = b
break
}
}
start := blk.offsetInBlock
end := start + blk.bytesToRead
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
totalRead += int(blk.bytesToRead)
minIndex++
if minIndex-blocks[0].index >= len(blocks) {
break
}
} else {
break
}
}
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
break
}
}
return totalRead, nil
}
// Close closes the SzstdReaderAt and underlying decoder.
func (s *SzstdReaderAt) Close() error {
if err := s.r.Close(); err != nil {
return err
}
s.decoder.Close()
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
type uncompressedModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (u *uncompressedModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return o.Object.Open(ctx, options...)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (u *uncompressedModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,65 +0,0 @@
package compress
import (
"context"
"fmt"
"io"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunkedreader"
)
// unknownModeHandler implements compressionModeHandler for unknown compression types
type unknownModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
return 0, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (unk *unknownModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (unk *unknownModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return nil, nil, fmt.Errorf("unknown compression mode")
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
return nil
}

View File

@@ -1,192 +0,0 @@
package compress
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"errors"
"io"
"github.com/klauspost/compress/zstd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunkedreader"
"github.com/rclone/rclone/fs/hash"
)
// zstdModeHandler implements compressionModeHandler for zstd
type zstdModeHandler struct{}
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
// the configured threshold
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
var b bytes.Buffer
var n int64
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
if err != nil {
return false, err
}
n, err = io.Copy(w, r)
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
// newObjectGetOriginalSize returns the original file size from the metadata
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
if meta.CompressionMetadataZstd == nil {
return 0, errors.New("missing zstd metadata")
}
return meta.CompressionMetadataZstd.Size, nil
}
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
func (z *zstdModeHandler) openGetReadCloser(
ctx context.Context,
o *Object,
offset int64,
limit int64,
cr chunkedreader.ChunkedReader,
closer io.Closer,
options ...fs.OpenOption,
) (rc io.ReadCloser, err error) {
var file io.Reader
if offset != 0 {
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
} else {
file, err = zstd.NewReader(cr)
}
if err != nil {
return nil, err
}
var fileReader io.Reader
if limit != -1 {
fileReader = io.LimitReader(file, limit)
} else {
fileReader = file
}
// Return a ReadCloser
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
}
// processFileNameGetFileExtension returns the file extension for the given compression mode
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
if compressionMode == Zstd {
return zstdFileExt
}
return ""
}
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
func (z *zstdModeHandler) putCompress(
ctx context.Context,
f *Fs,
in io.Reader,
src fs.ObjectInfo,
options []fs.OpenOption,
mimeType string,
) (fs.Object, *ObjectMetadata, error) {
// Unwrap reader accounting
in, wrap := accounting.UnWrap(in)
// Add the metadata hasher
metaHasher := md5.New()
in = io.TeeReader(in, metaHasher)
// Compress the file
pipeReader, pipeWriter := io.Pipe()
resultsZstd := make(chan compressionResult[SzstdMetadata])
go func() {
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
if err != nil {
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
close(resultsZstd)
return
}
_, err = io.Copy(writer, in)
if wErr := writer.Close(); wErr != nil && err == nil {
err = wErr
}
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
err = cErr
}
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
close(resultsZstd)
}()
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
ht := f.Fs.Hashes().GetOne()
var hasher *hash.MultiHasher
var err error
if ht != hash.None {
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
if err != nil {
return nil, nil, err
}
wrappedIn = io.TeeReader(wrappedIn, hasher)
wrappedIn = wrap(wrappedIn)
}
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
if err != nil {
return nil, nil, err
}
result := <-resultsZstd
if result.err != nil {
if o != nil {
_ = o.Remove(ctx)
}
return nil, nil, result.err
}
// Build metadata using uncompressed size for filename
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
if ht != hash.None && hasher != nil {
err = f.verifyObjectHash(ctx, o, hasher, ht)
if err != nil {
return nil, nil, err
}
}
return o, meta, nil
}
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
}
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
// Warning: This function panics if cmeta is not of the expected type.
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
meta, ok := cmeta.(SzstdMetadata)
if !ok {
panic("invalid cmeta type: expected SzstdMetadata")
}
objMeta := new(ObjectMetadata)
objMeta.Size = size
objMeta.Mode = mode
objMeta.CompressionMetadataGzip = nil
objMeta.CompressionMetadataZstd = &meta
objMeta.MD5 = md5
objMeta.MimeType = mimeType
return objMeta
}

View File

@@ -923,30 +923,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
var commandHelp = []fs.CommandHelp{
{
Name: "encode",
Short: "Encode the given filename(s).",
Short: "Encode the given filename(s)",
Long: `This encodes the filenames given as arguments returning a list of
strings of the encoded results.
Usage examples:
Usage Example:
` + "```console" + `
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
` + "```",
rclone backend encode crypt: file1 [file2...]
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
`,
},
{
Name: "decode",
Short: "Decode the given filename(s).",
Short: "Decode the given filename(s)",
Long: `This decodes the filenames given as arguments returning a list of
strings of the decoded results. It will return an error if any of the
inputs are invalid.
Usage examples:
Usage Example:
` + "```console" + `
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
` + "```",
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
`,
},
}

View File

@@ -563,26 +563,21 @@ var commandHelp = []fs.CommandHelp{{
Short: "Show metadata about the DOI.",
Long: `This command returns a JSON object with some information about the DOI.
Usage example:
rclone backend medatadata doi:
` + "```console" + `
rclone backend metadata doi:
` + "```" + `
It returns a JSON object representing metadata about the DOI.`,
It returns a JSON object representing metadata about the DOI.
`,
}, {
Name: "set",
Short: "Set command for updating the config parameters.",
Long: `This set command can be used to update the config parameters
for a running doi backend.
Usage examples:
Usage Examples:
` + "```console" + `
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
` + "```" + `
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
The option keys are named as they are in the config file.
@@ -590,7 +585,8 @@ This rebuilds the connection to the doi backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.`,
It doesn't return anything.
`,
}}
// Command the backend to run a named command
@@ -602,7 +598,7 @@ It doesn't return anything.`,
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "metadata":
return f.ShowMetadata(ctx)
@@ -629,7 +625,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
// ShowMetadata returns some metadata about the corresponding DOI
func (f *Fs) ShowMetadata(ctx context.Context) (metadata any, err error) {
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
if err != nil {
return nil, err

View File

@@ -18,7 +18,7 @@ type headerLink struct {
}
func parseLinkHeader(header string) (links []headerLink) {
for link := range strings.SplitSeq(header, ",") {
for _, link := range strings.Split(header, ",") {
link = strings.TrimSpace(link)
parsed := parseLink(link)
if parsed != nil {
@@ -30,7 +30,7 @@ func parseLinkHeader(header string) (links []headerLink) {
func parseLink(link string) (parsedLink *headerLink) {
var parts []string
for part := range strings.SplitSeq(link, ";") {
for _, part := range strings.Split(link, ";") {
parts = append(parts, strings.TrimSpace(part))
}

View File

@@ -191,7 +191,7 @@ func driveScopes(scopesString string) (scopes []string) {
if scopesString == "" {
scopesString = defaultScope
}
for scope := range strings.SplitSeq(scopesString, ",") {
for _, scope := range strings.Split(scopesString, ",") {
scope = strings.TrimSpace(scope)
scopes = append(scopes, scopePrefix+scope)
}
@@ -1220,7 +1220,7 @@ func isLinkMimeType(mimeType string) bool {
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, err error) {
for _, extensionText := range extensionsIn {
for extension := range strings.SplitSeq(extensionText, ",") {
for _, extension := range strings.Split(extensionText, ",") {
extension = strings.ToLower(strings.TrimSpace(extension))
if extension == "" {
continue
@@ -1965,28 +1965,9 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
entriesAdded := 0
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
directoryID = actualID(directoryID)
@@ -1998,30 +1979,25 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return true
}
if entry != nil {
err = list.Add(entry)
if err != nil {
iErr = err
return true
}
entriesAdded++
entries = append(entries, entry)
}
return false
})
if err != nil {
return err
return nil, err
}
if iErr != nil {
return iErr
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
return nil, err
}
}
return list.Flush()
return entries, nil
}
// listREntry is a task to be executed by a litRRunner
@@ -3664,47 +3640,41 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters.",
Long: `This is a get command which will be used to fetch the various drive config
parameters.
Short: "Get command for fetching the drive config parameters",
Long: `This is a get command which will be used to fetch the various drive config parameters
Usage examples:
Usage Examples:
` + "```console" + `
rclone backend get drive: [-o service_account_file] [-o chunk_size]
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
` + "```",
rclone backend get drive: [-o service_account_file] [-o chunk_size]
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
`,
Opts: map[string]string{
"chunk_size": "Show the current upload chunk size.",
"service_account_file": "Show the current service account file.",
"chunk_size": "show the current upload chunk size",
"service_account_file": "show the current service account file",
},
}, {
Name: "set",
Short: "Set command for updating the drive config parameters.",
Long: `This is a set command which will be used to update the various drive config
parameters.
Short: "Set command for updating the drive config parameters",
Long: `This is a set command which will be used to update the various drive config parameters
Usage examples:
Usage Examples:
` + "```console" + `
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
` + "```",
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
`,
Opts: map[string]string{
"chunk_size": "Update the current upload chunk size.",
"service_account_file": "Update the current service account file.",
"chunk_size": "update the current upload chunk size",
"service_account_file": "update the current service account file",
},
}, {
Name: "shortcut",
Short: "Create shortcuts from files or directories.",
Short: "Create shortcuts from files or directories",
Long: `This command creates shortcuts from files or directories.
Usage examples:
Usage:
` + "```console" + `
rclone backend shortcut drive: source_item destination_shortcut
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
` + "```" + `
rclone backend shortcut drive: source_item destination_shortcut
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut". The
@@ -3714,100 +3684,90 @@ from "drive:"
In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to
"drive2:". This may fail with a permission error if the user
authenticated with "drive2:" can't read files from "drive:".`,
authenticated with "drive2:" can't read files from "drive:".
`,
Opts: map[string]string{
"target": "Optional target remote for the shortcut destination.",
"target": "optional target remote for the shortcut destination",
},
}, {
Name: "drives",
Short: "List the Shared Drives available to this account.",
Short: "List the Shared Drives available to this account",
Long: `This command lists the Shared Drives (Team Drives) available to this
account.
Usage example:
Usage:
` + "```console" + `
rclone backend [-o config] drives drive:
` + "```" + `
rclone backend [-o config] drives drive:
This will return a JSON list of objects like this:
This will return a JSON list of objects like this
` + "```json" + `
[
{
"id": "0ABCDEF-01234567890",
"kind": "drive#teamDrive",
"name": "My Drive"
},
{
"id": "0ABCDEFabcdefghijkl",
"kind": "drive#teamDrive",
"name": "Test Drive"
}
]
` + "```" + `
[
{
"id": "0ABCDEF-01234567890",
"kind": "drive#teamDrive",
"name": "My Drive"
},
{
"id": "0ABCDEFabcdefghijkl",
"kind": "drive#teamDrive",
"name": "Test Drive"
}
]
With the -o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
drives found and a combined drive.
` + "```ini" + `
[My Drive]
type = alias
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
[My Drive]
type = alias
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
[Test Drive]
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
[Test Drive]
type = alias
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
[AllDrives]
type = combine
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
` + "```" + `
[AllDrives]
type = combine
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.`,
drives combined into one directory tree.
`,
}, {
Name: "untrash",
Short: "Untrash files and directories.",
Short: "Untrash files and directories",
Long: `This command untrashes all the files and directories in the directory
passed in recursively.
Usage example:
` + "```console" + `
rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir
` + "```" + `
Usage:
This takes an optional directory to trash which make this easier to
use via the API.
Use the --interactive/-i or --dry-run flag to see what would be restored before
restoring it.
rclone backend untrash drive:directory
rclone backend --interactive untrash drive:directory subdir
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
Result:
` + "```json" + `
{
"Untrashed": 17,
"Errors": 0
}
` + "```",
{
"Untrashed": 17,
"Errors": 0
}
`,
}, {
Name: "copyid",
Short: "Copy files by ID.",
Long: `This command copies files by ID.
Short: "Copy files by ID",
Long: `This command copies files by ID
Usage examples:
Usage:
` + "```console" + `
rclone backend copyid drive: ID path
rclone backend copyid drive: ID1 path1 ID2 path2
` + "```" + `
rclone backend copyid drive: ID path
rclone backend copyid drive: ID1 path1 ID2 path2
It copies the drive file with ID given to the path (an rclone path which
will be passed internally to rclone copyto). The ID and path pairs can be
@@ -3820,19 +3780,17 @@ component will be used as the file name.
If the destination is a drive backend then server-side copying will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before
copying.`,
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`,
}, {
Name: "moveid",
Short: "Move files by ID.",
Long: `This command moves files by ID.
Short: "Move files by ID",
Long: `This command moves files by ID
Usage examples:
Usage:
` + "```console" + `
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
` + "```" + `
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
It moves the drive file with ID given to the path (an rclone path which
will be passed internally to rclone moveto).
@@ -3844,65 +3802,58 @@ component will be used as the file name.
If the destination is a drive backend then server-side moving will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
`,
}, {
Name: "exportformats",
Short: "Dump the export formats for debug purposes.",
Short: "Dump the export formats for debug purposes",
}, {
Name: "importformats",
Short: "Dump the import formats for debug purposes.",
Short: "Dump the import formats for debug purposes",
}, {
Name: "query",
Short: "List files using Google Drive query language.",
Long: `This command lists files based on a query.
Short: "List files using Google Drive query language",
Long: `This command lists files based on a query
Usage example:
` + "```console" + `
rclone backend query drive: query
` + "```" + `
Usage:
rclone backend query drive: query
The query syntax is documented at [Google Drive Search query terms and
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
For example:
` + "```console" + `
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
` + "```" + `
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
If the query contains literal ' or \ characters, these need to be escaped with
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
file named "foo ' \.txt":
` + "```console" + `
rclone backend query drive: "name = 'foo \' \\\.txt'"
` + "```" + `
rclone backend query drive: "name = 'foo \' \\\.txt'"
The result is a JSON array of matches, for example:
` + "```json" + `
[
{
"createdTime": "2017-06-29T19:58:28.537Z",
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
"mimeType": "text/plain",
"modifiedTime": "2024-02-02T10:40:02.874Z",
"name": "foo ' \\.txt",
"parents": [
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
],
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
"size": "311",
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
}
]
` + "```console",
[
{
"createdTime": "2017-06-29T19:58:28.537Z",
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
"mimeType": "text/plain",
"modifiedTime": "2024-02-02T10:40:02.874Z",
"name": "foo ' \\.txt",
"parents": [
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
],
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
"size": "311",
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
}
]`,
}, {
Name: "rescue",
Short: "Rescue or delete any orphaned files.",
Short: "Rescue or delete any orphaned files",
Long: `This command rescues or deletes any orphaned files or directories.
Sometimes files can get orphaned in Google Drive. This means that they
@@ -3911,31 +3862,26 @@ are no longer in any folder in Google Drive.
This command finds those files and either rescues them to a directory
you specify or deletes them.
Usage:
This can be used in 3 ways.
First, list all orphaned files:
First, list all orphaned files
` + "```console" + `
rclone backend rescue drive:
` + "```" + `
rclone backend rescue drive:
Second rescue all orphaned files to the directory indicated:
Second rescue all orphaned files to the directory indicated
` + "```console" + `
rclone backend rescue drive: "relative/path/to/rescue/directory"
` + "```" + `
rclone backend rescue drive: "relative/path/to/rescue/directory"
E.g. to rescue all orphans to a directory called "Orphans" in the top level:
e.g. To rescue all orphans to a directory called "Orphans" in the top level
` + "```console" + `
rclone backend rescue drive: Orphans
` + "```" + `
rclone backend rescue drive: Orphans
Third delete all orphaned files to the trash:
Third delete all orphaned files to the trash
` + "```console" + `
rclone backend rescue drive: -o delete
` + "```",
rclone backend rescue drive: -o delete
`,
}}
// Command the backend to run a named command
@@ -4671,7 +4617,6 @@ var (
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)

View File

@@ -386,6 +386,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
g.SetLimit(o.fs.ci.Checkers)
var mu sync.Mutex // protect the info.Permissions from concurrent writes
for _, permissionID := range info.PermissionIds {
permissionID := permissionID
g.Go(func() error {
// must fetch the team drive ones individually to check the inherited flag
perm, inherited, err := o.fs.getPermission(gCtx, actualID(info.Id), permissionID, !o.fs.isTeamDrive)
@@ -519,6 +520,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
// merge metadata into request and user metadata
for k, v := range meta {
k, v := k, v
// parse a boolean from v and write into out
parseBool := func(out *bool) error {
b, err := strconv.ParseBool(v)

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
@@ -835,7 +834,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// listSharedFolders lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
@@ -848,7 +847,7 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return err
return nil, err
}
started = true
} else {
@@ -860,15 +859,15 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
err = callback(d)
entries = append(entries, d)
if err != nil {
return err
return nil, err
}
}
if res.Cursor == "" {
@@ -876,26 +875,22 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
}
}
return nil
return entries, nil
}
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
errFoundFile := errors.New("found file")
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
if entry.(*fs.Dir).Remote() == name {
id = entry.(*fs.Dir).ID()
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return id, nil
} else if err != nil {
entries, err := f.listSharedFolders(ctx)
if err != nil {
return "", err
}
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound
}
@@ -913,7 +908,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
// listReceivedFiles lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
@@ -926,7 +921,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return err
return nil, err
}
started = true
} else {
@@ -938,7 +933,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -951,34 +946,27 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
modTime: *entry.TimeInvited,
}
if err != nil {
return err
}
err = callback(o)
if err != nil {
return err
return nil, err
}
entries = append(entries, o)
}
if res.Cursor == "" {
break
}
}
return nil
return entries, nil
}
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
errFoundFile := errors.New("found file")
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
if entry.(*Object).remote == name {
o = entry.(*Object)
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return o, nil
} else if err != nil {
files, err := f.listReceivedFiles(ctx)
if err != nil {
return nil, err
}
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound
}
@@ -992,37 +980,11 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
if f.opt.SharedFiles {
err := f.listReceivedFiles(ctx, list.Add)
if err != nil {
return err
}
return list.Flush()
return f.listReceivedFiles(ctx)
}
if f.opt.SharedFolders {
err := f.listSharedFolders(ctx, list.Add)
if err != nil {
return err
}
return list.Flush()
return f.listSharedFolders(ctx)
}
root := f.slashRoot
@@ -1052,7 +1014,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
err = fs.ErrorDirNotFound
}
}
return err
return nil, err
}
started = true
} else {
@@ -1064,7 +1026,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -1089,20 +1051,14 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
err = list.Add(d)
if err != nil {
return err
}
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
if err != nil {
return err
return nil, err
}
if o.(*Object).exportType.listable() {
err = list.Add(o)
if err != nil {
return err
}
entries = append(entries, o)
}
}
}
@@ -1110,7 +1066,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
break
}
}
return list.Flush()
return entries, nil
}
// Put the object
@@ -1330,16 +1286,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
var result *files.RelocationResult
err = f.pacer.Call(func() (bool, error) {
result, err = f.srv.MoveV2(&arg)
switch e := err.(type) {
case files.MoveV2APIError:
// There seems to be a bit of eventual consistency here which causes this to
// fail on just created objects
// See: https://github.com/rclone/rclone/issues/8881
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
fs.Debugf(srcObj, "Retrying move on %v error", err)
return true, err
}
}
return shouldRetry(ctx, err)
})
if err != nil {
@@ -1500,9 +1446,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
}
}
usage = &fs.Usage{
Total: fs.NewUsageValue(total), // quota of bytes that can be used
Used: fs.NewUsageValue(used), // bytes in use
Free: fs.NewUsageValue(total - used), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(used)), // bytes in use
Free: fs.NewUsageValue(int64(total - used)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}
@@ -2141,7 +2087,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)

View File

@@ -8,7 +8,7 @@ type CreateFolderResponse struct {
Status int `json:"status"`
Msg string `json:"msg"`
Result struct {
FldID any `json:"fld_id"`
FldID interface{} `json:"fld_id"`
} `json:"result"`
}

View File

@@ -14,7 +14,7 @@ import (
)
// errFileNotFound represent file not found error
var errFileNotFound = errors.New("file not found")
var errFileNotFound error = errors.New("file not found")
// getFileCode retrieves the file code for a given file path
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {

View File

@@ -163,16 +163,6 @@ Enabled by default. Use 0 to disable.`,
Help: "Disable TLS 1.3 (workaround for FTP servers with buggy TLS)",
Default: false,
Advanced: true,
}, {
Name: "allow_insecure_tls_ciphers",
Help: `Allow insecure TLS ciphers
Setting this flag will allow the usage of the following TLS ciphers in addition to the secure defaults:
- TLS_RSA_WITH_AES_128_GCM_SHA256
`,
Default: false,
Advanced: true,
}, {
Name: "shut_timeout",
Help: "Maximum time to wait for data connection closing status.",
@@ -246,30 +236,29 @@ a write only folder.
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
AllowInsecureTLSCiphers bool `config:"allow_insecure_tls_ciphers"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
Host string `config:"host"`
User string `config:"user"`
Pass string `config:"pass"`
Port string `config:"port"`
TLS bool `config:"tls"`
ExplicitTLS bool `config:"explicit_tls"`
TLSCacheSize int `config:"tls_cache_size"`
DisableTLS13 bool `config:"disable_tls13"`
Concurrency int `config:"concurrency"`
SkipVerifyTLSCert bool `config:"no_check_certificate"`
DisableEPSV bool `config:"disable_epsv"`
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
AskPassword bool `config:"ask_password"`
Enc encoder.MultiEncoder `config:"encoding"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
NoCheckUpload bool `config:"no_check_upload"`
}
// Fs represents a remote FTP server
@@ -283,7 +272,6 @@ type Fs struct {
user string
pass string
dialAddr string
tlsConf *tls.Config // default TLS client config
poolMu sync.Mutex
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
@@ -409,14 +397,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
if f.tlsConf != nil {
tlsConfig = f.tlsConf.Clone()
} else {
tlsConfig = new(tls.Config)
}
tlsConfig.ServerName = f.opt.Host
if f.opt.SkipVerifyTLSCert {
tlsConfig.InsecureSkipVerify = true
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
@@ -424,14 +407,6 @@ func (f *Fs) tlsConfig() *tls.Config {
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
if f.opt.AllowInsecureTLSCiphers {
var ids []uint16
// Read default ciphers
for _, cs := range tls.CipherSuites() {
ids = append(ids, cs.ID)
}
tlsConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
}
}
return tlsConfig
}
@@ -456,7 +431,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
}
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" || f.proxyURL != nil {
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
// We need to make the onward connection to f.opt.Host. However the FTP
// library sets the host to the proxy IP after using EPSV or PASV so we need
// to correct that here.
@@ -466,11 +443,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return nil, err
}
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
} else {
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
}
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}
@@ -679,7 +652,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
tlsConf: fshttp.NewTransport(ctx).TLSClientConfig,
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -1292,7 +1264,7 @@ func (f *ftpReadCloser) Close() error {
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
if errX := textprotoError(err); errX != nil {
switch errX.Code {
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK:
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
err = nil
}
}

View File

@@ -252,9 +252,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-east4",
Help: "Northern Virginia",
}, {
Value: "us-east5",
Help: "Ohio",
}, {
Value: "us-west1",
Help: "Oregon",
@@ -763,7 +760,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -771,16 +768,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return err
return entries, err
}
// listBuckets lists the buckets
@@ -823,46 +820,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -1134,15 +1099,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
// Set the storage class for the destination object if configured
var dstObject *storage.Object
if f.opt.StorageClass != "" {
dstObject = &storage.Object{
StorageClass: f.opt.StorageClass,
}
}
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, dstObject)
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
@@ -1430,10 +1387,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Set the storage class from config if configured
if o.fs.opt.StorageClass != "" {
object.StorageClass = o.fs.opt.StorageClass
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
@@ -1509,7 +1462,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -117,22 +117,16 @@ func init() {
} else {
oauthConfig.Scopes = scopesReadWrite
}
return oauthutil.ConfigOut("warning1", &oauthutil.Options{
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning1":
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning2", true, "config_warning", `Warning
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning2":
// Warn the user that rclone can no longer download photos it didnt upload from google photos
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: Due to Google policy changes rclone can now only download photos it uploaded.`)
case "warning_done":
return nil, nil
}

View File

@@ -43,42 +43,33 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
var commandHelp = []fs.CommandHelp{{
Name: "drop",
Short: "Drop cache.",
Short: "Drop cache",
Long: `Completely drop checksum cache.
Usage example:
` + "```console" + `
rclone backend drop hasher:
` + "```",
Usage Example:
rclone backend drop hasher:
`,
}, {
Name: "dump",
Short: "Dump the database.",
Long: "Dump cache records covered by the current remote.",
Short: "Dump the database",
Long: "Dump cache records covered by the current remote",
}, {
Name: "fulldump",
Short: "Full dump of the database.",
Long: "Dump all cache records in the database.",
Short: "Full dump of the database",
Long: "Dump all cache records in the database",
}, {
Name: "import",
Short: "Import a SUM file.",
Short: "Import a SUM file",
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
Usage example:
` + "```console" + `
rclone backend import hasher:subdir md5 /path/to/sum.md5
` + "```",
Usage Example:
rclone backend import hasher:subdir md5 /path/to/sum.md5
`,
}, {
Name: "stickyimport",
Short: "Perform fast import of a SUM file.",
Short: "Perform fast import of a SUM file",
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
Usage example:
` + "```console" + `
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
` + "```",
Usage Example:
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
`,
}}
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {

View File

@@ -371,9 +371,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, err
}
return &fs.Usage{
Total: fs.NewUsageValue(info.Capacity),
Used: fs.NewUsageValue(info.Used),
Free: fs.NewUsageValue(info.Remaining),
Total: fs.NewUsageValue(int64(info.Capacity)),
Used: fs.NewUsageValue(int64(info.Used)),
Free: fs.NewUsageValue(int64(info.Remaining)),
}, nil
}

View File

@@ -11,7 +11,6 @@ import (
"io"
"mime"
"net/http"
"net/textproto"
"net/url"
"path"
"strings"
@@ -38,10 +37,6 @@ func init() {
Description: "HTTP",
NewFs: NewFs,
CommandHelp: commandHelp,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
},
Options: []fs.Option{{
Name: "url",
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
@@ -103,40 +98,6 @@ sizes of any files, and some files that don't exist may be in the listing.`,
fs.Register(fsi)
}
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"cache-control": {
Help: "Cache-Control header",
Type: "string",
Example: "no-cache",
},
"content-disposition": {
Help: "Content-Disposition header",
Type: "string",
Example: "inline",
},
"content-disposition-filename": {
Help: "Filename retrieved from Content-Disposition header",
Type: "string",
Example: "file.txt",
},
"content-encoding": {
Help: "Content-Encoding header",
Type: "string",
Example: "gzip",
},
"content-language": {
Help: "Content-Language header",
Type: "string",
Example: "en-US",
},
"content-type": {
Help: "Content-Type header",
Type: "string",
Example: "text/plain",
},
}
// Options defines the configuration for this backend
type Options struct {
Endpoint string `config:"url"`
@@ -165,13 +126,6 @@ type Object struct {
size int64
modTime time.Time
contentType string
// Metadata as pointers to strings as they often won't be present
contentDisposition *string // Content-Disposition: header
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
cacheControl *string // Cache-Control: header
contentEncoding *string // Content-Encoding: header
contentLanguage *string // Content-Language: header
}
// statusError returns an error if the res contained an error
@@ -323,7 +277,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
}
f.features = (&fs.Features{
ReadMetadata: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
@@ -476,29 +429,6 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
return names, nil
}
// parseFilename extracts the filename from a Content-Disposition header
func parseFilename(contentDisposition string) (string, error) {
// Normalize the contentDisposition to canonical MIME format
mediaType, params, err := mime.ParseMediaType(contentDisposition)
if err != nil {
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
}
// Check if the contentDisposition is an attachment
if strings.ToLower(mediaType) != "attachment" {
return "", fmt.Errorf("not an attachment: %s", mediaType)
}
// Extract the filename from the parameters
filename, ok := params["filename"]
if !ok {
return "", fmt.Errorf("filename not found in contentDisposition")
}
// Decode filename if it contains special encoding
return textproto.TrimString(filename), nil
}
// Adds the configured headers to the request if any
func addHeaders(req *http.Request, opt *Options) {
for i := 0; i < len(opt.Headers); i += 2 {
@@ -647,9 +577,6 @@ func (o *Object) String() string {
// Remote the name of the remote HTTP file, relative to the fs root
func (o *Object) Remote() string {
if o.contentDispositionFilename != nil {
return *o.contentDispositionFilename
}
return o.remote
}
@@ -707,29 +634,6 @@ func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
contentDisposition := res.Header.Get("Content-Disposition")
if contentDisposition != "" {
o.contentDisposition = &contentDisposition
}
if o.contentDisposition != nil {
var filename string
filename, err = parseFilename(*o.contentDisposition)
if err == nil && filename != "" {
o.contentDispositionFilename = &filename
}
}
cacheControl := res.Header.Get("Cache-Control")
if cacheControl != "" {
o.cacheControl = &cacheControl
}
contentEncoding := res.Header.Get("Content-Encoding")
if contentEncoding != "" {
o.contentEncoding = &contentEncoding
}
contentLanguage := res.Header.Get("Content-Language")
if contentLanguage != "" {
o.contentLanguage = &contentLanguage
}
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
@@ -818,13 +722,11 @@ var commandHelp = []fs.CommandHelp{{
Long: `This set command can be used to update the config parameters
for a running http backend.
Usage examples:
Usage Examples:
` + "```console" + `
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: -o url=https://example.com
` + "```" + `
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
rclone rc backend/command command=set fs=remote: -o url=https://example.com
The option keys are named as they are in the config file.
@@ -832,7 +734,8 @@ This rebuilds the connection to the http backend when it is called with
the new parameters. Only new parameters need be passed as the values
will default to those currently in use.
It doesn't return anything.`,
It doesn't return anything.
`,
}}
// Command the backend to run a named command
@@ -868,30 +771,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
}
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
metadata = make(fs.Metadata, 6)
if o.contentType != "" {
metadata["content-type"] = o.contentType
}
// Set system metadata
setMetadata := func(k string, v *string) {
if v == nil || *v == "" {
return
}
metadata[k] = *v
}
setMetadata("content-disposition", o.contentDisposition)
setMetadata("content-disposition-filename", o.contentDispositionFilename)
setMetadata("cache-control", o.cacheControl)
setMetadata("content-language", o.contentLanguage)
setMetadata("content-encoding", o.contentEncoding)
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
@@ -899,5 +778,4 @@ var (
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.Commander = &Fs{}
_ fs.Metadataer = &Object{}
)

View File

@@ -60,17 +60,6 @@ func prepareServer(t *testing.T) configmap.Simple {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
// Set the content disposition header for the fifth file
// later we will check if it is set using the metadata method
if r.URL.Path == "/five.txt.gz" {
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Content-Language", "en-US")
w.Header().Set("Content-Encoding", "gzip")
}
fileServer.ServeHTTP(w, r)
})
@@ -113,33 +102,27 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
sort.Sort(entries)
require.Equal(t, 5, len(entries))
require.Equal(t, 4, len(entries))
e := entries[0]
assert.Equal(t, "five.txt.gz", e.Remote())
assert.Equal(t, "four", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok := e.(fs.Object)
_, ok := e.(fs.Directory)
assert.True(t, ok)
e = entries[1]
assert.Equal(t, "four", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[2]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(5+lineEndSize), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
e = entries[3]
e = entries[2]
assert.Equal(t, "three", e.Remote())
assert.Equal(t, int64(-1), e.Size())
_, ok = e.(fs.Directory)
assert.True(t, ok)
e = entries[4]
e = entries[3]
assert.Equal(t, "two.html", e.Remote())
if noSlash {
assert.Equal(t, int64(-1), e.Size())
@@ -235,23 +218,6 @@ func TestNewObjectWithLeadingSlash(t *testing.T) {
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestNewObjectWithMetadata(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/five.txt.gz")
require.NoError(t, err)
assert.Equal(t, "five.txt.gz", o.Remote())
ho, ok := o.(*Object)
assert.True(t, ok)
metadata, err := ho.Metadata(context.Background())
require.NoError(t, err)
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
assert.Equal(t, "no-cache", metadata["cache-control"])
assert.Equal(t, "en-US", metadata["content-language"])
assert.Equal(t, "gzip", metadata["content-encoding"])
}
func TestOpen(t *testing.T) {
m := prepareServer(t)

Binary file not shown.

View File

@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
}
// Copy src to this remote using server-side copy operations.
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
"x-archive-auto-make-bucket": "1",
"x-archive-queue-derive": "0",
"x-archive-keep-old-version": "0",
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
"x-amz-metadata-directive": "COPY",
"x-archive-filemeta-sha1": srcObj.sha1,
"x-archive-filemeta-md5": srcObj.md5,
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1334,6 +1334,16 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
for _, v := range seg {
newValues = append(newValues, url.PathEscape(v))
}
return strings.Join(newValues, "/")
}
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}

View File

@@ -17,7 +17,6 @@ import (
"net/url"
"os"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -60,43 +59,31 @@ const (
configVersion = 1
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
defaultClientID = "jottacli"
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
tele2CloudClientID = "desktop"
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
onlimeCloudClientID = "desktop"
)
type service struct {
key string
name string
domain string
realm string
clientID string
scopes []string
}
// The list of services and their settings for supporting traditional OAuth.
// Please keep these in alphabetical order, but with jottacloud first.
func getServices() []service {
return []service{
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
}
}
// Register with Fs
func init() {
// needs to be done early so we can use oauth during config
@@ -172,44 +159,36 @@ func init() {
}
// Config runs the backend configuration protocol
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
switch conf.State {
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
return nil, errors.New("not supported by this backend")
}
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
Value: "standard",
Help: `Standard authentication.
This is primarily supported by the official service, but may also be
supported by some white-label services. It is designed for command-line
applications, and you will be asked to enter a single-use personal login
token which you must manually generate from the account security settings
in the web interface of your service.`,
}, {
Value: "traditional",
Help: `Traditional authentication.
This is supported by the official service and all white-label services
that rclone knows about. You will be asked which service to connect to.
It has a limitation of only a single active authentication at a time. You
need to be on, or have access to, a machine with an internet-connected
web browser.`,
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
}, {
Value: "legacy",
Help: `Legacy authentication.
This is no longer supported by any known services and not recommended
used. You will be asked for your account's username and password.`,
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
}, {
Value: "onlime",
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result)
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
Generate it from the account security settings in the web interface of your
service, for the official service on https://www.jottacloud.com/web/secure.`)
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := conf.Result
loginToken := config.Result
m.Set(configClientID, defaultClientID)
m.Set(configClientSecret, "")
@@ -224,50 +203,10 @@ service, for the official service on https://www.jottacloud.com/web/secure.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "traditional":
services := getServices()
options := make([]fs.OptionExample, 0, len(services))
for _, service := range services {
options = append(options, fs.OptionExample{
Value: service.key,
Help: service.name,
})
}
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
options)
case "traditional_type":
services := getServices()
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
if i == -1 {
return nil, fmt.Errorf("unexpected service %q", conf.Result)
}
service := services[i]
opts := rest.Opts{
Method: "GET",
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
}
var wellKnown api.WellKnown
srv := rest.NewClient(fshttp.NewClient(ctx))
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
}
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, service.clientID)
m.Set(configTokenURL, wellKnown.TokenEndpoint)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: wellKnown.AuthorizationEndpoint,
TokenURL: wellKnown.TokenEndpoint,
ClientID: service.clientID,
Scopes: service.scopes,
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one
only uses rclone on a single machine. When you want to use rclone with
this account on more than one machine it's recommended to create a
@@ -275,7 +214,7 @@ machine specific API key. These keys can NOT be shared between
machines.`)
case "legacy_api":
srv := rest.NewClient(fshttp.NewClient(ctx))
if conf.Result == "true" {
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, fmt.Errorf("failed to register device: %w", err)
@@ -284,16 +223,16 @@ machines.`)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
case "legacy_username":
m.Set(configUsername, conf.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
m.Set(configUsername, config.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
case "legacy_password":
m.Set("password", conf.Result)
m.Set("password", config.Result)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
@@ -303,12 +242,12 @@ machines.`)
authCode, _ := m.Get("auth_code")
srv := rest.NewClient(fshttp.NewClient(ctx))
clientID, _ := m.Get(configClientID)
if clientID == "" {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, _ := m.Get(configClientSecret)
if clientSecret == "" {
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
@@ -321,7 +260,7 @@ machines.`)
}
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
if err == errAuthCodeRequired {
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
}
m.Set("password", "")
m.Set("auth_code", "")
@@ -333,6 +272,58 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "tele2": // tele2 cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "onlime": // onlime cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
Choosing no, the default, will let you access the storage used for the archive
@@ -340,7 +331,7 @@ section of the official Jottacloud client. If you instead want to access the
sync or the backup section, for example, you must choose yes.`)
case "choose_device_query":
if conf.Result != "true" {
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
return fs.ConfigGoto("end")
@@ -381,7 +372,7 @@ a new by entering a unique name.`, defaultDevice)
return deviceNames[i], ""
})
case "choose_device_result":
device := conf.Result
device := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
@@ -441,7 +432,7 @@ You may create a new by entering a unique name.`, device)
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := conf.Result
mountpoint := config.Result
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
@@ -472,7 +463,7 @@ You may create a new by entering a unique name.`, device)
if isNew {
if device == defaultDevice {
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
}
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
@@ -487,7 +478,7 @@ You may create a new by entering a unique name.`, device)
// All the config flows end up here in case we need to carry on with something
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", conf.State)
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
@@ -938,12 +929,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
clientID, _ := m.Get(configClientID)
if clientID == "" {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, _ := m.Get(configClientSecret)
if clientSecret == "" {
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
oauthConfig.ClientID = clientID
@@ -1009,13 +1000,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features.ListR = nil
}
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
f.user = cust.Username
f.setEndpoints()
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, err := f.readMetaDataForPath(ctx, "")
@@ -1025,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return err
})
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
f.user = cust.Username
f.setEndpoints()
if root != "" && !rootIsDir {
// Check to see if the root actually an existing file
remote := path.Base(root)

View File

@@ -497,6 +497,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
"path/filepath"
@@ -115,17 +114,6 @@ points, as you explicitly acknowledge that they should be skipped.`,
NoPrefix: true,
Advanced: true,
},
{
Name: "skip_specials",
Help: `Don't warn about skipped pipes, sockets and device objects.
This flag disables warning messages on skipped pipes, sockets and
device objects, as you explicitly acknowledge that they should be
skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
},
{
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
@@ -339,7 +327,6 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
SkipSpecials bool `config:"skip_specials"`
UTFNorm bool `config:"unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
@@ -684,12 +671,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
symlinkFlag := os.ModeSymlink
if runtime.GOOS == "windows" {
symlinkFlag |= os.ModeIrregular
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&symlinkFlag) != 0 {
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
@@ -711,13 +694,13 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&symlinkFlag) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&symlinkFlag != 0 {
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
}
// Don't include non directory if not included
@@ -854,13 +837,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
err := os.Remove(localPath)
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
if os.Chmod(localPath, 0o600) == nil {
err = os.Remove(localPath)
}
}
return err
return os.Remove(localPath)
}
// Precision of the file system
@@ -1070,11 +1047,12 @@ func (f *Fs) Hashes() hash.Set {
var commandHelp = []fs.CommandHelp{
{
Name: "noop",
Short: "A null operation for testing backend commands.",
Long: `This is a test command which has some options you can try to change the output.`,
Short: "A null operation for testing backend commands",
Long: `This is a test command which has some options
you can try to change the output.`,
Opts: map[string]string{
"echo": "Echo the input arguments.",
"error": "Return an error based on option value.",
"echo": "echo the input arguments",
"error": "return an error based on option value",
},
},
}
@@ -1257,9 +1235,7 @@ func (o *Object) Storable() bool {
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if !o.fs.opt.SkipSpecials {
fs.Logf(o, "Can't transfer non file/directory")
}
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")

View File

@@ -334,7 +334,7 @@ func TestMetadata(t *testing.T) {
func testMetadata(t *testing.T, r *fstest.Run, o *Object, when time.Time) {
ctx := context.Background()
whenRFC := when.Local().Format(time.RFC3339Nano)
whenRFC := when.Format(time.RFC3339Nano)
const dayLength = len("2001-01-01")
f := r.Flocal.(*Fs)

View File

@@ -1,40 +0,0 @@
//go:build windows
package local
import (
"context"
"path/filepath"
"runtime"
"syscall"
"testing"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
// Microsoft docs indicate that "This attribute is not honored on directories."
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
// and https://github.com/golang/go/issues/26295
func TestRmdirWindows(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skipf("windows only")
}
r := fstest.NewRun(t)
defer r.Finalise()
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
require.NoError(t, err)
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
require.NoError(t, err)
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
require.NoError(t, err)
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
assert.NoError(t, err)
}

View File

@@ -1,4 +1,4 @@
//go:build dragonfly || plan9 || js || aix
//go:build dragonfly || plan9 || js
package local

View File

@@ -400,7 +400,7 @@ type quirks struct {
}
func (q *quirks) parseQuirks(option string) {
for flag := range strings.SplitSeq(option, ",") {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
@@ -1770,7 +1770,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
f.speedupAny = false
uniqueValidPatterns := make(map[string]any)
for pattern := range strings.SplitSeq(patternString, ",") {
for _, pattern := range strings.Split(patternString, ",") {
pattern = strings.ToLower(strings.TrimSpace(pattern))
if pattern == "" {
continue

View File

@@ -18,7 +18,6 @@ Improvements:
import (
"context"
"crypto/tls"
"encoding/base64"
"errors"
"fmt"
"io"
@@ -48,9 +47,6 @@ const (
maxSleep = 2 * time.Second
eventWaitTime = 500 * time.Millisecond
decayConstant = 2 // bigger for slower decay, exponential
sessionIDConfigKey = "session_id"
masterKeyConfigKey = "master_key"
)
var (
@@ -74,24 +70,6 @@ func init() {
Help: "Password.",
Required: true,
IsPassword: true,
}, {
Name: "2fa",
Help: `The 2FA code of your MEGA account if the account is set up with one`,
Required: false,
}, {
Name: sessionIDConfigKey,
Help: "Session (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: masterKeyConfigKey,
Help: "Master key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: "debug",
Help: `Output more debug from Mega.
@@ -135,9 +113,6 @@ Enabling it will increase CPU usage and add network overhead.`,
type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
TwoFA string `config:"2fa"`
SessionID string `config:"session_id"`
MasterKey string `config:"master_key"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
@@ -234,19 +209,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// Create Fs
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// cache *mega.Mega on username so we can reuse and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
@@ -286,29 +248,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
}
if opt.SessionID == "" {
fs.Debugf(f, "Using username and password to initialize the Mega API")
err := srv.MultiFactorLogin(opt.User, opt.Pass, opt.TwoFA)
if err != nil {
return nil, fmt.Errorf("couldn't login: %w", err)
}
megaCache[opt.User] = srv
m.Set(sessionIDConfigKey, srv.GetSessionID())
encodedMasterKey := base64.StdEncoding.EncodeToString(srv.GetMasterKey())
m.Set(masterKeyConfigKey, encodedMasterKey)
} else {
fs.Debugf(f, "Using previously stored session ID and master key to initialize the Mega API")
decodedMasterKey, err := base64.StdEncoding.DecodeString(opt.MasterKey)
if err != nil {
return nil, fmt.Errorf("couldn't decode master key: %w", err)
}
err = srv.LoginWithKeys(opt.SessionID, decodedMasterKey)
if err != nil {
fs.Debugf(f, "login with previous auth keys failed: %v", err)
}
err := srv.Login(opt.User, opt.Pass)
if err != nil {
return nil, fmt.Errorf("couldn't login: %w", err)
}
megaCache[opt.User] = srv
}
f.srv = srv
root = parsePath(root)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: srv,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
DuplicateFiles: true,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
// Find the root node and check if it is a file or not
_, err = f.findRoot(ctx, false)
@@ -988,9 +946,9 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
return nil, fmt.Errorf("failed to get Mega Quota: %w", err)
}
usage := &fs.Usage{
Total: fs.NewUsageValue(q.Mstrg), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Cstrg), // bytes in use
Free: fs.NewUsageValue(q.Mstrg - q.Cstrg), // bytes which can be uploaded before reaching the quota
Total: fs.NewUsageValue(int64(q.Mstrg)), // quota of bytes that can be used
Used: fs.NewUsageValue(int64(q.Cstrg)), // bytes in use
Free: fs.NewUsageValue(int64(q.Mstrg - q.Cstrg)), // bytes which can be uploaded before reaching the quota
}
return usage, nil
}

View File

@@ -325,12 +325,13 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
return callback(entry)
entries = append(entries, entry)
return nil
})
return err
return entries, err
}
// listBuckets lists the buckets to entries
@@ -353,46 +354,15 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -659,7 +629,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -87,7 +87,7 @@ Please choose the 'y' option to set your own password then enter your secret.`,
var commandHelp = []fs.CommandHelp{{
Name: "du",
Short: "Return disk usage information for a specified directory.",
Short: "Return disk usage information for a specified directory",
Long: `The usage information returned, includes the targeted directory as well as all
files stored in any sub-directories that may exist.`,
}, {
@@ -96,12 +96,7 @@ files stored in any sub-directories that may exist.`,
Long: `The desired path location (including applicable sub-directories) ending in
the object that will be the target of the symlink (for example, /links/mylink).
Include the file extension for the object, if applicable.
Usage example:
` + "```console" + `
rclone backend symlink <src> <path>
` + "```",
` + "`rclone backend symlink <src> <path>`",
},
}

View File

@@ -243,6 +243,7 @@ func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
numSet = 0
for k, v := range metadata {
k, v := k, v
switch k {
case "mtime":
t, err := time.Parse(timeFormatIn, v)
@@ -421,7 +422,12 @@ func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
return slices.ContainsFunc(p.GetGrantedToIdentities(m.fs.driveType), hasUserIdentity)
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {

View File

@@ -1377,27 +1377,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
entry, err := f.itemToDirEntry(ctx, dir, info)
@@ -1407,16 +1389,13 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if entry == nil {
return nil
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
return nil
})
if err != nil {
return err
return nil, err
}
return list.Flush()
return entries, nil
}
// ListR lists the objects and directories of the Fs starting
@@ -3044,7 +3023,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}

View File

@@ -172,8 +172,8 @@ func BenchmarkQuickXorHash(b *testing.B) {
require.NoError(b, err)
require.Equal(b, len(buf), n)
h := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
h.Write(buf)
h.Sum(nil)

View File

@@ -30,25 +30,20 @@ const (
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object.",
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage example:
Usage Examples:
` + "```console" + `
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
` + "```",
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads.",
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
Usage example:
` + "```console" + `
rclone backend list-multipart-uploads oos:bucket/path/to/object
` + "```" + `
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
@@ -56,82 +51,70 @@ multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
` + "```json" + `
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
}`,
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use --interactive/-i or --dry-run with this command to see
what it would do.
Note that you can use --interactive/-i or --dry-run with this command to see what
it would do.
Usage examples:
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
` + "```console" + `
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
` + "```" + `
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete.",
"max-age": "Max age of upload to delete",
},
}, {
Name: operationRestore,
Short: "Restore objects from Archive to Standard storage.",
Long: `This command can be used to restore one or more objects from Archive to
Standard storage.
Short: "Restore objects from Archive to Standard storage",
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
Usage examples:
Usage Examples:
` + "```console" + `
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
rclone backend restore oos:bucket -o hours=HOURS
` + "```" + `
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
rclone backend restore oos:bucket -o hours=HOURS
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
` + "```console" + `
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
` + "```" + `
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
All the objects shown will be marked for restore, then:
All the objects shown will be marked for restore, then
` + "```console" + `
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
` + "```" + `
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
It returns a list of status dictionaries with Object Name and Status keys.
The Status will be "RESTORED"" if it was successful or an error message if not.
It returns a list of status dictionaries with Object Name and Status
keys. The Status will be "RESTORED"" if it was successful or an error message
if not.
` + "```json" + `
[
{
"Object": "test.txt"
"Status": "RESTORED",
},
{
"Object": "test/file4.txt"
"Status": "RESTORED",
}
]
` + "```",
[
{
"Object": "test.txt"
"Status": "RESTORED",
},
{
"Object": "test/file4.txt"
"Status": "RESTORED",
}
]
`,
Opts: map[string]string{
"hours": `The number of hours for which this object will be restored.
Default is 24 hrs.`,
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
},
},
}

View File

@@ -12,7 +12,6 @@ import (
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
@@ -34,46 +33,9 @@ func init() {
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as opc-meta- keys.`,
},
})
}
var systemMetadataInfo = map[string]fs.MetadataHelp{
"opc-meta-mode": {
Help: "File type and mode",
Type: "octal, unix style",
Example: "0100664",
},
"opc-meta-uid": {
Help: "User ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-gid": {
Help: "Group ID of owner",
Type: "decimal number",
Example: "500",
},
"opc-meta-atime": {
Help: "Time of last access",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-mtime": {
Help: "Time of last modification",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
"opc-meta-btime": {
Help: "Time of file birth (creation)",
Type: "ISO 8601",
Example: "2025-06-30T22:27:43-04:00",
},
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
@@ -120,7 +82,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMetadata: true,
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
@@ -254,47 +215,15 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
@@ -443,24 +372,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets returns all the buckets to out
@@ -759,45 +688,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return list.Flush()
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx)
if err != nil {
return nil, err
}
metadata = make(fs.Metadata, len(o.meta)+7)
for k, v := range o.meta {
switch k {
case metaMtime:
if modTime, err := swift.FloatStringToTime(v); err == nil {
metadata["mtime"] = modTime.Format(time.RFC3339Nano)
}
case metaMD5Hash:
// don't write hash metadata
default:
metadata[k] = v
}
}
if o.mimeType != "" {
metadata["content-type"] = o.mimeType
}
if !o.lastModified.IsZero() {
metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
}
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -629,31 +629,11 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
return list.Add(o)
entries = append(entries, o)
return nil
})
if err != nil {
return err
}
return list.Flush()
return entries, err
}
// ListR lists the objects and directories of the Fs starting
@@ -1397,8 +1377,6 @@ var (
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)

View File

@@ -5,7 +5,6 @@ package api
import (
"fmt"
"net/url"
"reflect"
"strconv"
"time"
@@ -137,25 +136,8 @@ type Link struct {
}
// Valid reports whether l is non-nil, has an URL, and is not expired.
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
func (l *Link) Valid() bool {
if l == nil || l.URL == "" {
return false
}
// Primary validation: check URL's expire query parameter
if u, err := url.Parse(l.URL); err == nil {
if expireStr := u.Query().Get("expire"); expireStr != "" {
// Try parsing as Unix timestamp (seconds)
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
expireTime := time.Unix(expireInt, 0)
return time.Now().Add(10 * time.Second).Before(expireTime)
}
}
}
// Fallback validation: use the Expire field if URL parsing didn't work
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
}
// URL is a basic form of URL

View File

@@ -1,99 +0,0 @@
package api
import (
"fmt"
"testing"
"time"
)
// TestLinkValid tests the Link.Valid method for various scenarios
func TestLinkValid(t *testing.T) {
tests := []struct {
name string
link *Link
expected bool
desc string
}{
{
name: "nil link",
link: nil,
expected: false,
desc: "nil link should be invalid",
},
{
name: "empty URL",
link: &Link{URL: ""},
expected: false,
desc: "empty URL should be invalid",
},
{
name: "valid URL with future expire parameter",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
},
expected: true,
desc: "URL with future expire parameter should be valid",
},
{
name: "expired URL with past expire parameter",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
},
expected: false,
desc: "URL with past expire parameter should be invalid",
},
{
name: "URL expire parameter takes precedence over Expire field",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
},
expected: true,
desc: "URL expire parameter should take precedence over Expire field",
},
{
name: "URL expire parameter within 10 second buffer should be invalid",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
},
expected: false,
desc: "URL expire parameter within 10 second buffer should be invalid",
},
{
name: "fallback to Expire field when no URL expire parameter",
link: &Link{
URL: "https://example.com/file",
Expire: Time(time.Now().Add(time.Hour)),
},
expected: true,
desc: "should fallback to Expire field when URL has no expire parameter",
},
{
name: "fallback to Expire field when URL expire parameter is invalid",
link: &Link{
URL: "https://example.com/file?expire=invalid",
Expire: Time(time.Now().Add(time.Hour)),
},
expected: true,
desc: "should fallback to Expire field when URL expire parameter is unparsable",
},
{
name: "invalid when both URL expire and Expire field are expired",
link: &Link{
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
Expire: Time(time.Now().Add(-time.Hour)),
},
expected: false,
desc: "should be invalid when both URL expire and Expire field are expired",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := tt.link.Valid()
if result != tt.expected {
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
}
})
}
}

View File

@@ -979,24 +979,6 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
return nil
}
// untrash a file or directory by ID
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the restored item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) untrashObjects(ctx context.Context, IDs []string) (err error) {
if len(IDs) == 0 {
return nil
}
req := api.RequestBatch{
IDs: IDs,
}
if err := f.requestBatchAction(ctx, "batchUntrash", &req); err != nil {
return fmt.Errorf("untrash object failed: %w", err)
}
return nil
}
// purgeCheck removes the root directory, if check is set then it
// refuses to do so if it has anything in
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
@@ -1081,14 +1063,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.waitTask(ctx, info.TaskID)
}
// Move the object to a new parent folder
//
// Objects cannot be moved to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the moved item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
// Move the object
func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 {
return nil
@@ -1104,12 +1079,6 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
}
// renames the object
//
// The new name must be different from the current name.
// "file_rename_to_same_name" (3): Name of file or folder is not changed
//
// Within the same folder, object names must be unique.
// "file_duplicated_name" (3): File name cannot be repeated
func (f *Fs) renameObject(ctx context.Context, ID, newName string) (info *api.File, err error) {
req := api.File{
Name: f.opt.Enc.FromStandardName(newName),
@@ -1194,13 +1163,18 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
}
@@ -1211,74 +1185,31 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
return nil, err
}
if srcObj.parent != dstParentID {
// Perform the move. A numbered copy might be generated upon name collision.
if srcParentID != dstParentID {
// Do the move
if err = f.moveObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("move: failed to move object %s to new parent %s: %w", srcObj.id, dstParentID, err)
return nil, err
}
defer func() {
if err != nil {
// FIXME: Restored file might have a numbered name if a conflict occurs
if mvErr := f.moveObjects(ctx, []string{srcObj.id}, srcObj.parent); mvErr != nil {
fs.Logf(f, "move: couldn't restore original object %q to %q after move failure: %v", dstObj.id, src.Remote(), mvErr)
}
}
}()
}
// Manually update info of moved object to save API calls
dstObj.id = srcObj.id
dstObj.mimeType = srcObj.mimeType
dstObj.gcid = srcObj.gcid
dstObj.md5sum = srcObj.md5sum
dstObj.hasMetaData = true
// Find the moved object and any conflict object with the same name.
var moved, conflict *api.File
_, err = f.listAll(ctx, dstParentID, api.KindOfFile, "false", func(item *api.File) bool {
if item.ID == srcObj.id {
moved = item
if item.Name == dstLeaf {
return true
}
} else if item.Name == dstLeaf {
conflict = item
if srcLeaf != dstLeaf {
// Rename
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("move: couldn't rename moved file: %w", err)
}
// Stop early if both found
return moved != nil && conflict != nil
})
if err != nil {
return nil, fmt.Errorf("move: couldn't locate moved file %q in destination directory %q: %w", srcObj.id, dstParentID, err)
return dstObj, dstObj.setMetaData(info)
}
if moved == nil {
return nil, fmt.Errorf("move: moved file %q not found in destination", srcObj.id)
}
// If moved object already has the correct name, return
if moved.Name == dstLeaf {
return dstObj, dstObj.setMetaData(moved)
}
// If name collision, delete conflicting file first
if conflict != nil {
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("move: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "move: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
}
info, err := f.renameObject(ctx, srcObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("move: couldn't rename moved file %q to %q: %w", dstObj.id, dstLeaf, err)
}
return dstObj, dstObj.setMetaData(info)
return dstObj, nil
}
// copy objects
//
// Objects cannot be copied to their current folder.
// "file_move_or_copy_to_cur" (9): Please don't move or copy to current folder or sub folder
//
// If a name collision occurs in the destination folder, PikPak might automatically
// rename the copied item(s) by appending a numbered suffix. For example,
// foo.txt -> foo(1).txt or foo(2).txt if foo(1).txt already exists
func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err error) {
if len(IDs) == 0 {
return nil
@@ -1302,13 +1233,13 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Object, err error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
err = srcObj.readMetaData(ctx)
err := srcObj.readMetaData(ctx)
if err != nil {
return nil, err
}
@@ -1323,55 +1254,31 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
fs.Debugf(src, "Can't copy - same parent")
return nil, fs.ErrorCantCopy
}
// Check for possible conflicts: Pikpak creates numbered copies on name collision.
var conflict *api.File
_, srcLeaf := dircache.SplitPath(srcObj.remote)
if srcLeaf == dstLeaf {
if conflict, err = f.readMetaDataForPath(ctx, remote); err == nil {
// delete conflicting file
if err = f.deleteObjects(ctx, []string{conflict.ID}, true); err != nil {
return nil, fmt.Errorf("copy: couldn't delete conflicting file: %w", err)
}
defer func() {
if err != nil {
if restoreErr := f.untrashObjects(ctx, []string{conflict.ID}); restoreErr != nil {
fs.Logf(f, "copy: couldn't restore conflicting file: %v", restoreErr)
}
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
} else {
dstDir, _ := dircache.SplitPath(remote)
dstObj.remote = path.Join(dstDir, srcLeaf)
if conflict, err = f.readMetaDataForPath(ctx, dstObj.remote); err == nil {
tmpName := conflict.Name + "-rclone-copy-" + random.String(8)
if _, err = f.renameObject(ctx, conflict.ID, tmpName); err != nil {
return nil, fmt.Errorf("copy: couldn't rename conflicting file: %w", err)
}
defer func() {
if _, renameErr := f.renameObject(ctx, conflict.ID, conflict.Name); renameErr != nil {
fs.Logf(f, "copy: couldn't rename conflicting file back to original: %v", renameErr)
}
}()
} else if err != fs.ErrorObjectNotFound {
return nil, err
}
}
// Copy the object
if err := f.copyObjects(ctx, []string{srcObj.id}, dstParentID); err != nil {
return nil, fmt.Errorf("couldn't copy file: %w", err)
}
err = dstObj.readMetaData(ctx)
if err != nil {
// Update info of the copied object with new parent but source name
if info, err := dstObj.fs.readMetaDataForPath(ctx, srcObj.remote); err != nil {
return nil, fmt.Errorf("copy: couldn't locate copied file: %w", err)
} else if err = dstObj.setMetaData(info); err != nil {
return nil, err
}
// Can't copy and change name in one step so we have to check if we have
// the correct name after copy
srcLeaf, _, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
if err != nil {
return nil, err
}
if srcLeaf != dstLeaf {
return f.Move(ctx, dstObj, remote)
// Rename
info, err := f.renameObject(ctx, dstObj.id, dstLeaf)
if err != nil {
return nil, fmt.Errorf("copy: couldn't rename copied file: %w", err)
}
return dstObj, dstObj.setMetaData(info)
}
return dstObj, nil
}
@@ -1508,30 +1415,8 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
}
if new.File == nil {
return nil, fmt.Errorf("invalid response: %+v", new)
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if new.Task != nil {
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
}
})()
// Note: The API might automatically append a numbered suffix to the filename,
// even if a file with the same name does not exist in the target directory.
if upName := f.opt.Enc.ToStandardName(new.File.Name); leaf != upName {
return nil, fserrors.NoRetryError(fmt.Errorf("uploaded file name mismatch: expected %q, got %q", leaf, upName))
}
// early return; in case of zero-byte objects or uploaded by matched gcid
if new.File.Phase == api.PhaseTypeComplete {
} else if new.File.Phase == api.PhaseTypeComplete {
// early return; in case of zero-byte objects
if acc, ok := in.(*accounting.Account); ok && acc != nil {
// if `in io.Reader` is still in type of `*accounting.Account` (meaning that it is unused)
// it is considered as a server side copy as no incoming/outgoing traffic occur at all
@@ -1541,6 +1426,18 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
return new.File, nil
}
defer atexit.OnError(&err, func() {
fs.Debugf(leaf, "canceling upload: %v", err)
if cancelErr := f.deleteObjects(ctx, []string{new.File.ID}, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
if cancelErr := f.deleteTask(ctx, new.Task.ID, false); cancelErr != nil {
fs.Logf(leaf, "failed to cancel upload: %v", cancelErr)
}
fs.Debugf(leaf, "waiting %v for the cancellation to be effective", taskWaitTime)
time.Sleep(taskWaitTime)
})()
if uploadType == api.UploadTypeForm && new.Form != nil {
err = f.uploadByForm(ctx, in, req.Name, size, new.Form, options...)
} else if uploadType == api.UploadTypeResumable && new.Resumable != nil {
@@ -1552,9 +1449,6 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, gcid string,
if err != nil {
return nil, fmt.Errorf("failed to upload: %w", err)
}
if new.Task == nil {
return new.File, nil
}
return new.File, f.waitTask(ctx, new.Task.ID)
}
@@ -1678,43 +1572,39 @@ func (f *Fs) decompressDir(ctx context.Context, filename, id, password string, s
var commandHelp = []fs.CommandHelp{{
Name: "addurl",
Short: "Add offline download task for url.",
Short: "Add offline download task for url",
Long: `This command adds offline download task for url.
Usage example:
Usage:
` + "```console" + `
rclone backend addurl pikpak:dirpath url
` + "```" + `
rclone backend addurl pikpak:dirpath url
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
download will fallback to default 'My Pack' folder.`,
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
download will fallback to default 'My Pack' folder.
`,
}, {
Name: "decompress",
Short: "Request decompress of a file/files in a folder.",
Short: "Request decompress of a file/files in a folder",
Long: `This command requests decompress of file/files in a folder.
Usage examples:
Usage:
` + "```console" + `
rclone backend decompress pikpak:dirpath {filename} -o password=password
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
` + "```" + `
rclone backend decompress pikpak:dirpath {filename} -o password=password
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
An optional argument 'filename' can be specified for a file located in
'pikpak:dirpath'. You may want to pass '-o password=password' for a
password-protected files. Also, pass '-o delete-src-file' to delete
An optional argument 'filename' can be specified for a file located in
'pikpak:dirpath'. You may want to pass '-o password=password' for a
password-protected files. Also, pass '-o delete-src-file' to delete
source files after decompression finished.
Result:
` + "```json" + `
{
"Decompressed": 17,
"SourceDeleted": 0,
"Errors": 0
}
` + "```",
{
"Decompressed": 17,
"SourceDeleted": 0,
"Errors": 0
}
`,
}}
// Command the backend to run a named command

View File

@@ -793,7 +793,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err
}
usage = &fs.Usage{
Used: fs.NewUsageValue(info.SpaceUsed),
Used: fs.NewUsageValue(int64(info.SpaceUsed)),
}
return usage, nil
}

View File

@@ -13,8 +13,6 @@ import (
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
"github.com/henrybear327/go-proton-api"
"github.com/pquerna/otp/totp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -89,17 +87,6 @@ The value can also be provided with --protondrive-2fa=000000
The 2FA code of your proton drive account if the account is set up with
two-factor authentication`,
Required: false,
}, {
Name: "otp_secret_key",
Help: `The OTP secret key
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
The OTP secret key of your proton drive account if the account is set up with
two-factor authentication`,
Required: false,
Sensitive: true,
IsPassword: true,
}, {
Name: clientUIDKey,
Help: "Client uid key (internal use only)",
@@ -204,7 +191,6 @@ type Options struct {
Password string `config:"password"`
MailboxPassword string `config:"mailbox_password"`
TwoFA string `config:"2fa"`
OtpSecretKey string `config:"otp_secret_key"`
// advanced
Enc encoder.MultiEncoder `config:"encoding"`
@@ -370,15 +356,7 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
config.FirstLoginCredential.Username = opt.Username
config.FirstLoginCredential.Password = opt.Password
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
config.FirstLoginCredential.TwoFA = opt.TwoFA
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
if err != nil {
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
}
config.FirstLoginCredential.TwoFA = code
}
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
if err != nil {
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
@@ -417,14 +395,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
if opt.OtpSecretKey != "" {
var err error
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
if err != nil {
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
}
}
ci := fs.GetConfig(ctx)
root = strings.Trim(root, "/")

View File

@@ -59,7 +59,11 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := min(neededMemory, max(int64(speed*u.effectiveTime.Seconds()), u.reserved))
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize

View File

@@ -1,217 +0,0 @@
# Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll then need to do add the following (optional tags are in [] and
do not get displayed in rclone config if empty):
The process is as follows: Create yaml -> add docs -> run tests ->
adjust yaml until tests pass.
All tags can be found in `backend/s3/providers.go` Provider Struct.
Looking through a few of the yaml files as examples should make things
clear. `AWS.yaml` as the most config. pasting.
## YAML
In `backend/s3/provider/YourProvider.yaml`
- name
- description
- More like the full name often "YourProvider + Object Storage"
- [Region]
- Any regions your provider supports or the defaults (use `region: {}` for this)
- Example from AWS.yaml:
```yaml
region:
us-east-1: |-
The default endpoint - a good choice if you are unsure.
US Region, Northern Virginia, or Pacific Northwest.
Leave location constraint empty.
```
- The defaults (as seen in Rclone.yaml):
```yaml
region:
"": |-
Use this if unsure.
Will use v4 signatures and an empty region.
other-v2-signature: |-
Use this only if v4 signatures don't work.
E.g. pre Jewel/v10 CEPH.
```
- [Endpoint]
- Any endpoints your provider supports
- Example from Mega.yaml
```yaml
endpoint:
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
```
- [Location Constraint]
- The Location Constraint of your remote, often same as region.
- Example from AWS.yaml
```yaml
location_constraint:
"": Empty for US Region, Northern Virginia, or Pacific Northwest
us-east-2: US East (Ohio) Region
```
- [ACL]
- Identical across *most* providers. Select the default with `acl: {}`
- Example from AWS.yaml
```yaml
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
bucket-owner-read: |-
Object owner gets FULL_CONTROL.
Bucket owner gets READ access.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
bucket-owner-full-control: |-
Both the object owner and the bucket owner get FULL_CONTROL over the object.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
```
- [Storage Class]
- Identical across *most* providers.
- Defaults from AWS.yaml
```yaml
storage_class:
"": Default
STANDARD: Standard storage class
REDUCED_REDUNDANCY: Reduced redundancy storage class
STANDARD_IA: Standard Infrequent Access storage class
ONEZONE_IA: One Zone Infrequent Access storage class
GLACIER: Glacier Flexible Retrieval storage class
DEEP_ARCHIVE: Glacier Deep Archive storage class
INTELLIGENT_TIERING: Intelligent-Tiering storage class
GLACIER_IR: Glacier Instant Retrieval storage class
```
- [Server Side Encryption]
- Not common, identical across *most* providers.
- Defaults from AWS.yaml
```yaml
server_side_encryption:
"": None
AES256: AES256
aws:kms: aws:kms
```
- [Advanced Options]
- All advanced options are Boolean - if true the configurator asks about that
value, if not it doesn't:
```go
BucketACL bool `yaml:"bucket_acl,omitempty"`
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
RequesterPays bool `yaml:"requester_pays,omitempty"`
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
```
- Example from AWS.yaml:
```yaml
bucket_acl: true
directory_bucket: true
leave_parts_on_error: true
requester_pays: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
sts_endpoint: true
use_accelerate_endpoint: true
```
- Quirks
- Quirks are discovered through documentation and running the tests as seen below.
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
```go
type Quirks struct {
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
MightGzip *bool `yaml:"might_gzip,omitempty"`
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
UseXID *bool `yaml:"use_x_id,omitempty"`
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
}
```
- Example from AWS.yaml
```yaml
quirks:
might_gzip: false # Never auto gzips objects
use_unsigned_payload: false # AWS has trailer support
```
Note that if you omit a section, eg `region` then the user won't be
asked that question, and if you add an empty section e.g. `region: {}`
then the defaults from the `Other.yaml` will be used.
## DOCS
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- Rule of thumb: don't edit anything not mentioned above.
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- This will make autogenerated changes!
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## TESTS
Once you've written the code, test `rclone config` works to your
satisfaction and looks correct, and check the integration tests work
`go test -v -remote NewS3Provider:`. You may need to adjust the quirks
to get them to pass. Some providers just can't pass the tests with
control characters in the names so if these fail and the provider
doesn't support `urlEncodeListings` in the quirks then ignore them.

View File

@@ -1,140 +0,0 @@
name: AWS
description: Amazon Web Services (AWS) S3
region:
us-east-1: |-
The default endpoint - a good choice if you are unsure.
US Region, Northern Virginia, or Pacific Northwest.
Leave location constraint empty.
us-east-2: |-
US East (Ohio) Region.
Needs location constraint us-east-2.
us-west-1: |-
US West (Northern California) Region.
Needs location constraint us-west-1.
us-west-2: |-
US West (Oregon) Region.
Needs location constraint us-west-2.
ca-central-1: |-
Canada (Central) Region.
Needs location constraint ca-central-1.
eu-west-1: |-
EU (Ireland) Region.
Needs location constraint EU or eu-west-1.
eu-west-2: |-
EU (London) Region.
Needs location constraint eu-west-2.
eu-west-3: |-
EU (Paris) Region.
Needs location constraint eu-west-3.
eu-north-1: |-
EU (Stockholm) Region.
Needs location constraint eu-north-1.
eu-south-1: |-
EU (Milan) Region.
Needs location constraint eu-south-1.
eu-central-1: |-
EU (Frankfurt) Region.
Needs location constraint eu-central-1.
ap-southeast-1: |-
Asia Pacific (Singapore) Region.
Needs location constraint ap-southeast-1.
ap-southeast-2: |-
Asia Pacific (Sydney) Region.
Needs location constraint ap-southeast-2.
ap-northeast-1: |-
Asia Pacific (Tokyo) Region.
Needs location constraint ap-northeast-1.
ap-northeast-2: |-
Asia Pacific (Seoul).
Needs location constraint ap-northeast-2.
ap-northeast-3: |-
Asia Pacific (Osaka-Local).
Needs location constraint ap-northeast-3.
ap-south-1: |-
Asia Pacific (Mumbai).
Needs location constraint ap-south-1.
ap-east-1: |-
Asia Pacific (Hong Kong) Region.
Needs location constraint ap-east-1.
sa-east-1: |-
South America (Sao Paulo) Region.
Needs location constraint sa-east-1.
il-central-1: |-
Israel (Tel Aviv) Region.
Needs location constraint il-central-1.
me-south-1: |-
Middle East (Bahrain) Region.
Needs location constraint me-south-1.
af-south-1: |-
Africa (Cape Town) Region.
Needs location constraint af-south-1.
cn-north-1: |-
China (Beijing) Region.
Needs location constraint cn-north-1.
cn-northwest-1: |-
China (Ningxia) Region.
Needs location constraint cn-northwest-1.
us-gov-east-1: |-
AWS GovCloud (US-East) Region.
Needs location constraint us-gov-east-1.
us-gov-west-1: |-
AWS GovCloud (US) Region.
Needs location constraint us-gov-west-1.
endpoint: {}
location_constraint:
'': Empty for US Region, Northern Virginia, or Pacific Northwest
us-east-2: US East (Ohio) Region
us-west-1: US West (Northern California) Region
us-west-2: US West (Oregon) Region
ca-central-1: Canada (Central) Region
eu-west-1: EU (Ireland) Region
eu-west-2: EU (London) Region
eu-west-3: EU (Paris) Region
eu-north-1: EU (Stockholm) Region
eu-south-1: EU (Milan) Region
EU: EU Region
ap-southeast-1: Asia Pacific (Singapore) Region
ap-southeast-2: Asia Pacific (Sydney) Region
ap-northeast-1: Asia Pacific (Tokyo) Region
ap-northeast-2: Asia Pacific (Seoul) Region
ap-northeast-3: Asia Pacific (Osaka-Local) Region
ap-south-1: Asia Pacific (Mumbai) Region
ap-east-1: Asia Pacific (Hong Kong) Region
sa-east-1: South America (Sao Paulo) Region
il-central-1: Israel (Tel Aviv) Region
me-south-1: Middle East (Bahrain) Region
af-south-1: Africa (Cape Town) Region
cn-north-1: China (Beijing) Region
cn-northwest-1: China (Ningxia) Region
us-gov-east-1: AWS GovCloud (US-East) Region
us-gov-west-1: AWS GovCloud (US) Region
acl: {}
storage_class:
'': Default
STANDARD: Standard storage class
REDUCED_REDUNDANCY: Reduced redundancy storage class
STANDARD_IA: Standard Infrequent Access storage class
ONEZONE_IA: One Zone Infrequent Access storage class
GLACIER: Glacier Flexible Retrieval storage class
DEEP_ARCHIVE: Glacier Deep Archive storage class
INTELLIGENT_TIERING: Intelligent-Tiering storage class
GLACIER_IR: Glacier Instant Retrieval storage class
server_side_encryption:
'': None
AES256: AES256
aws:kms: aws:kms
bucket_acl: true
directory_bucket: true
leave_parts_on_error: true
requester_pays: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
sts_endpoint: true
use_accelerate_endpoint: true
quirks:
might_gzip: false # Never auto gzips objects
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
use_data_integrity_protections: true

View File

@@ -1,37 +0,0 @@
name: Alibaba
description: Alibaba Cloud Object Storage System (OSS) formerly Aliyun
endpoint:
oss-accelerate.aliyuncs.com: Global Accelerate
oss-accelerate-overseas.aliyuncs.com: Global Accelerate (outside mainland China)
oss-cn-hangzhou.aliyuncs.com: East China 1 (Hangzhou)
oss-cn-shanghai.aliyuncs.com: East China 2 (Shanghai)
oss-cn-qingdao.aliyuncs.com: North China 1 (Qingdao)
oss-cn-beijing.aliyuncs.com: North China 2 (Beijing)
oss-cn-zhangjiakou.aliyuncs.com: North China 3 (Zhangjiakou)
oss-cn-huhehaote.aliyuncs.com: North China 5 (Hohhot)
oss-cn-wulanchabu.aliyuncs.com: North China 6 (Ulanqab)
oss-cn-shenzhen.aliyuncs.com: South China 1 (Shenzhen)
oss-cn-heyuan.aliyuncs.com: South China 2 (Heyuan)
oss-cn-guangzhou.aliyuncs.com: South China 3 (Guangzhou)
oss-cn-chengdu.aliyuncs.com: West China 1 (Chengdu)
oss-cn-hongkong.aliyuncs.com: Hong Kong (Hong Kong)
oss-us-west-1.aliyuncs.com: US West 1 (Silicon Valley)
oss-us-east-1.aliyuncs.com: US East 1 (Virginia)
oss-ap-southeast-1.aliyuncs.com: Southeast Asia Southeast 1 (Singapore)
oss-ap-southeast-2.aliyuncs.com: Asia Pacific Southeast 2 (Sydney)
oss-ap-southeast-3.aliyuncs.com: Southeast Asia Southeast 3 (Kuala Lumpur)
oss-ap-southeast-5.aliyuncs.com: Asia Pacific Southeast 5 (Jakarta)
oss-ap-northeast-1.aliyuncs.com: Asia Pacific Northeast 1 (Japan)
oss-ap-south-1.aliyuncs.com: Asia Pacific South 1 (Mumbai)
oss-eu-central-1.aliyuncs.com: Central Europe 1 (Frankfurt)
oss-eu-west-1.aliyuncs.com: West Europe (London)
oss-me-east-1.aliyuncs.com: Middle East 1 (Dubai)
acl: {}
storage_class:
'': Default
STANDARD: Standard storage class
GLACIER: Archive storage mode
STANDARD_IA: Infrequent access storage mode
bucket_acl: true
quirks:
use_multipart_etag: false # multipar ETags differ from AWS

View File

@@ -1,19 +0,0 @@
name: ArvanCloud
description: Arvan Cloud Object Storage (AOS)
endpoint:
s3.ir-thr-at1.arvanstorage.ir: |-
The default endpoint - a good choice if you are unsure.
Tehran Iran (Simin)
s3.ir-tbz-sh1.arvanstorage.ir: Tabriz Iran (Shahriar)
location_constraint:
ir-thr-at1: Tehran Iran (Simin)
ir-tbz-sh1: Tabriz Iran (Shahriar)
acl: {}
storage_class:
STANDARD: Standard storage class
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,20 +0,0 @@
name: Ceph
description: Ceph Object Storage
region: {}
endpoint: {}
location_constraint: {}
acl: {}
server_side_encryption:
'': None
AES256: AES256
aws:kms: aws:kms
bucket_acl: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false

View File

@@ -1,98 +0,0 @@
name: ChinaMobile
description: China Mobile Ecloud Elastic Object Storage (EOS)
endpoint:
eos-wuxi-1.cmecloud.cn: |-
The default endpoint - a good choice if you are unsure.
East China (Suzhou)
eos-jinan-1.cmecloud.cn: East China (Jinan)
eos-ningbo-1.cmecloud.cn: East China (Hangzhou)
eos-shanghai-1.cmecloud.cn: East China (Shanghai-1)
eos-zhengzhou-1.cmecloud.cn: Central China (Zhengzhou)
eos-hunan-1.cmecloud.cn: Central China (Changsha-1)
eos-zhuzhou-1.cmecloud.cn: Central China (Changsha-2)
eos-guangzhou-1.cmecloud.cn: South China (Guangzhou-2)
eos-dongguan-1.cmecloud.cn: South China (Guangzhou-3)
eos-beijing-1.cmecloud.cn: North China (Beijing-1)
eos-beijing-2.cmecloud.cn: North China (Beijing-2)
eos-beijing-4.cmecloud.cn: North China (Beijing-3)
eos-huhehaote-1.cmecloud.cn: North China (Huhehaote)
eos-chengdu-1.cmecloud.cn: Southwest China (Chengdu)
eos-chongqing-1.cmecloud.cn: Southwest China (Chongqing)
eos-guiyang-1.cmecloud.cn: Southwest China (Guiyang)
eos-xian-1.cmecloud.cn: Nouthwest China (Xian)
eos-yunnan.cmecloud.cn: Yunnan China (Kunming)
eos-yunnan-2.cmecloud.cn: Yunnan China (Kunming-2)
eos-tianjin-1.cmecloud.cn: Tianjin China (Tianjin)
eos-jilin-1.cmecloud.cn: Jilin China (Changchun)
eos-hubei-1.cmecloud.cn: Hubei China (Xiangyan)
eos-jiangxi-1.cmecloud.cn: Jiangxi China (Nanchang)
eos-gansu-1.cmecloud.cn: Gansu China (Lanzhou)
eos-shanxi-1.cmecloud.cn: Shanxi China (Taiyuan)
eos-liaoning-1.cmecloud.cn: Liaoning China (Shenyang)
eos-hebei-1.cmecloud.cn: Hebei China (Shijiazhuang)
eos-fujian-1.cmecloud.cn: Fujian China (Xiamen)
eos-guangxi-1.cmecloud.cn: Guangxi China (Nanning)
eos-anhui-1.cmecloud.cn: Anhui China (Huainan)
location_constraint:
wuxi1: East China (Suzhou)
jinan1: East China (Jinan)
ningbo1: East China (Hangzhou)
shanghai1: East China (Shanghai-1)
zhengzhou1: Central China (Zhengzhou)
hunan1: Central China (Changsha-1)
zhuzhou1: Central China (Changsha-2)
guangzhou1: South China (Guangzhou-2)
dongguan1: South China (Guangzhou-3)
beijing1: North China (Beijing-1)
beijing2: North China (Beijing-2)
beijing4: North China (Beijing-3)
huhehaote1: North China (Huhehaote)
chengdu1: Southwest China (Chengdu)
chongqing1: Southwest China (Chongqing)
guiyang1: Southwest China (Guiyang)
xian1: Northwest China (Xian)
yunnan: Yunnan China (Kunming)
yunnan2: Yunnan China (Kunming-2)
tianjin1: Tianjin China (Tianjin)
jilin1: Jilin China (Changchun)
hubei1: Hubei China (Xiangyan)
jiangxi1: Jiangxi China (Nanchang)
gansu1: Gansu China (Lanzhou)
shanxi1: Shanxi China (Taiyuan)
liaoning1: Liaoning China (Shenyang)
hebei1: Hebei China (Shijiazhuang)
fujian1: Fujian China (Xiamen)
guangxi1: Guangxi China (Nanning)
anhui1: Anhui China (Huainan)
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
storage_class:
'': Default
STANDARD: Standard storage class
GLACIER: Archive storage mode
STANDARD_IA: Infrequent access storage mode
server_side_encryption:
'': None
AES256: AES256
bucket_acl: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,8 +0,0 @@
name: Cloudflare
description: Cloudflare R2 Storage
region:
auto: R2 buckets are automatically distributed across Cloudflare's data centers for low latency.
endpoint: {}
quirks:
force_path_style: true
use_multipart_etag: false # multipart ETags are random

View File

@@ -1,10 +0,0 @@
name: Cubbit
description: Cubbit DS3 Object Storage
region:
eu-west-1: Europe West
endpoint:
s3.cubbit.eu: Cubbit DS3 Object Storage endpoint
acl: {}
bucket_acl: true
quirks:
use_multipart_etag: false

View File

@@ -1,20 +0,0 @@
name: DigitalOcean
description: DigitalOcean Spaces
region: {}
endpoint:
syd1.digitaloceanspaces.com: DigitalOcean Spaces Sydney 1
sfo3.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 3
sfo2.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 2
fra1.digitaloceanspaces.com: DigitalOcean Spaces Frankfurt 1
nyc3.digitaloceanspaces.com: DigitalOcean Spaces New York 3
ams3.digitaloceanspaces.com: DigitalOcean Spaces Amsterdam 3
sgp1.digitaloceanspaces.com: DigitalOcean Spaces Singapore 1
lon1.digitaloceanspaces.com: DigitalOcean Spaces London 1
tor1.digitaloceanspaces.com: DigitalOcean Spaces Toronto 1
blr1.digitaloceanspaces.com: DigitalOcean Spaces Bangalore 1
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_url_encode: false
use_already_exists: false

View File

@@ -1,11 +0,0 @@
name: Dreamhost
description: Dreamhost DreamObjects
region: {}
endpoint:
objects-us-east-1.dream.io: Dream Objects endpoint
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_url_encode: false
use_already_exists: false

View File

@@ -1,9 +0,0 @@
name: Exaba
description: Exaba Object Storage
region: {}
endpoint: {}
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
force_path_style: true

View File

@@ -1,21 +0,0 @@
name: FileLu
description: FileLu S5 (S3-Compatible Object Storage)
region:
global: Global
us-east: North America (US-East)
eu-central: Europe (EU-Central)
ap-southeast: Asia Pacific (AP-Southeast)
me-central: Middle East (ME-Central)
endpoint:
s5lu.com: Global FileLu S5 endpoint
us.s5lu.com: North America (US-East) region endpoint
eu.s5lu.com: Europe (EU-Central) region endpoint
ap.s5lu.com: Asia Pacific (AP-Southeast) region endpoint
me.s5lu.com: Middle East (ME-Central) region endpoint
acl: {}
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false

View File

@@ -1,6 +0,0 @@
name: FlashBlade
description: Pure Storage FlashBlade Object Storage
endpoint: {}
quirks:
might_gzip: false # never auto-gzip
force_path_style: true # supports vhost but defaults to path-style

View File

@@ -1,20 +0,0 @@
name: GCS
description: Google Cloud Storage
region: {}
endpoint:
https://storage.googleapis.com: Google Cloud Storage endpoint
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
# Google break request Signature by mutating accept-encoding HTTP header
# https://github.com/rclone/rclone/issues/6670
use_accept_encoding_gzip: false
sign_accept_encoding: false
use_already_exists: true # returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
# GCS doesn't like the x-id URL parameter the SDKv2 inserts
use_x_id: false
# GCS S3 doesn't support multi-part server side copy:
# See: https://issuetracker.google.com/issues/323465186
# So make cutoff very large which it does seem to support
copy_cutoff: 9223372036854775807

View File

@@ -1,15 +0,0 @@
name: Hetzner
description: Hetzner Object Storage
region:
hel1: Helsinki
fsn1: Falkenstein
nbg1: Nuremberg
endpoint:
hel1.your-objectstorage.com: Helsinki
fsn1.your-objectstorage.com: Falkenstein
nbg1.your-objectstorage.com: Nuremberg
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
use_already_exists: false

View File

@@ -1,41 +0,0 @@
name: HuaweiOBS
description: Huawei Object Storage Service
region:
af-south-1: AF-Johannesburg
ap-southeast-2: AP-Bangkok
ap-southeast-3: AP-Singapore
cn-east-3: CN East-Shanghai1
cn-east-2: CN East-Shanghai2
cn-north-1: CN North-Beijing1
cn-north-4: CN North-Beijing4
cn-south-1: CN South-Guangzhou
ap-southeast-1: CN-Hong Kong
sa-argentina-1: LA-Buenos Aires1
sa-peru-1: LA-Lima1
na-mexico-1: LA-Mexico City1
sa-chile-1: LA-Santiago2
sa-brazil-1: LA-Sao Paulo1
ru-northwest-2: RU-Moscow2
endpoint:
obs.af-south-1.myhuaweicloud.com: AF-Johannesburg
obs.ap-southeast-2.myhuaweicloud.com: AP-Bangkok
obs.ap-southeast-3.myhuaweicloud.com: AP-Singapore
obs.cn-east-3.myhuaweicloud.com: CN East-Shanghai1
obs.cn-east-2.myhuaweicloud.com: CN East-Shanghai2
obs.cn-north-1.myhuaweicloud.com: CN North-Beijing1
obs.cn-north-4.myhuaweicloud.com: CN North-Beijing4
obs.cn-south-1.myhuaweicloud.com: CN South-Guangzhou
obs.ap-southeast-1.myhuaweicloud.com: CN-Hong Kong
obs.sa-argentina-1.myhuaweicloud.com: LA-Buenos Aires1
obs.sa-peru-1.myhuaweicloud.com: LA-Lima1
obs.na-mexico-1.myhuaweicloud.com: LA-Mexico City1
obs.sa-chile-1.myhuaweicloud.com: LA-Santiago2
obs.sa-brazil-1.myhuaweicloud.com: LA-Sao Paulo1
obs.ru-northwest-2.myhuaweicloud.com: RU-Moscow2
acl: {}
bucket_acl: true
quirks:
# Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
list_url_encode: false
list_version: 1
use_already_exists: false

View File

@@ -1,126 +0,0 @@
name: IBMCOS
description: IBM COS S3
region: {}
endpoint:
s3.us.cloud-object-storage.appdomain.cloud: US Cross Region Endpoint
s3.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Endpoint
s3.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Endpoint
s3.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Endpoint
s3.private.us.cloud-object-storage.appdomain.cloud: US Cross Region Private Endpoint
s3.private.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Private Endpoint
s3.private.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Private Endpoint
s3.private.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Private Endpoint
s3.us-east.cloud-object-storage.appdomain.cloud: US Region East Endpoint
s3.private.us-east.cloud-object-storage.appdomain.cloud: US Region East Private Endpoint
s3.us-south.cloud-object-storage.appdomain.cloud: US Region South Endpoint
s3.private.us-south.cloud-object-storage.appdomain.cloud: US Region South Private Endpoint
s3.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Endpoint
s3.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Endpoint
s3.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Endpoint
s3.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Endpoint
s3.private.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Private Endpoint
s3.private.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Private Endpoint
s3.private.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Private Endpoint
s3.private.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Private Endpoint
s3.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Endpoint
s3.private.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Private Endpoint
s3.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Endpoint
s3.private.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Private Endpoint
s3.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Endpoint
s3.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Endpoint
s3.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Endpoint
s3.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Endpoint
s3.private.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Private Endpoint
s3.private.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Private Endpoint
s3.private.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Private Endpoint
s3.private.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Private Endpoint
s3.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Endpoint
s3.private.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Private Endpoint
s3.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Endpoint
s3.private.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Private Endpoint
s3.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Endpoint
s3.private.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Private Endpoint
s3.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Endpoint
s3.private.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Private Endpoint
s3.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Endpoint
s3.private.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Private Endpoint
s3.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Endpoint
s3.private.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Private Endpoint
s3.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Endpoint
s3.private.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Private Endpoint
s3.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Endpoint
s3.private.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Private Endpoint
s3.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Endpoint
s3.private.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Private Endpoint
s3.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Endpoint
s3.private.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Private Endpoint
s3.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Endpoint
s3.private.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Private Endpoint
s3.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Endpoint
s3.private.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Private Endpoint
s3.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Endpoint
s3.private.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Private Endpoint
s3.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Endpoint
s3.private.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Private Endpoint
s3.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Endpoint
s3.private.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Private Endpoint
location_constraint:
us-standard: US Cross Region Standard
us-vault: US Cross Region Vault
us-cold: US Cross Region Cold
us-flex: US Cross Region Flex
us-east-standard: US East Region Standard
us-east-vault: US East Region Vault
us-east-cold: US East Region Cold
us-east-flex: US East Region Flex
us-south-standard: US South Region Standard
us-south-vault: US South Region Vault
us-south-cold: US South Region Cold
us-south-flex: US South Region Flex
eu-standard: EU Cross Region Standard
eu-vault: EU Cross Region Vault
eu-cold: EU Cross Region Cold
eu-flex: EU Cross Region Flex
eu-gb-standard: Great Britain Standard
eu-gb-vault: Great Britain Vault
eu-gb-cold: Great Britain Cold
eu-gb-flex: Great Britain Flex
ap-standard: APAC Standard
ap-vault: APAC Vault
ap-cold: APAC Cold
ap-flex: APAC Flex
mel01-standard: Melbourne Standard
mel01-vault: Melbourne Vault
mel01-cold: Melbourne Cold
mel01-flex: Melbourne Flex
tor01-standard: Toronto Standard
tor01-vault: Toronto Vault
tor01-cold: Toronto Cold
tor01-flex: Toronto Flex
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
This acl is available on IBM Cloud (Infra), On-Premise IBM COS.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
Not supported on Buckets.
This acl is available on IBM Cloud (Infra) and On-Premise IBM COS.
ibm_api_key: true
ibm_resource_instance_id: true
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false # returns BucketAlreadyExists

View File

@@ -1,7 +0,0 @@
name: IDrive
description: IDrive e2
acl: {}
bucket_acl: true
quirks:
force_path_style: true
use_already_exists: false

View File

@@ -1,17 +0,0 @@
name: IONOS
description: IONOS Cloud
region:
de: Frankfurt, Germany
eu-central-2: Berlin, Germany
eu-south-2: Logrono, Spain
endpoint:
s3-eu-central-1.ionoscloud.com: Frankfurt, Germany
s3-eu-central-2.ionoscloud.com: Berlin, Germany
s3-eu-south-2.ionoscloud.com: Logrono, Spain
acl: {}
bucket_acl: true
quirks:
# listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
force_path_style: true
list_url_encode: false
use_already_exists: false

Some files were not shown because too many files have changed in this diff Show More