mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
133 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be2c44f5af | ||
|
|
1db0f51be4 | ||
|
|
6440052fbd | ||
|
|
4afb59bc93 | ||
|
|
0343670375 | ||
|
|
5b2b372ba9 | ||
|
|
08c35ae741 | ||
|
|
ecea0cd6f9 | ||
|
|
80e6389a50 | ||
|
|
a3ccf4d8a0 | ||
|
|
31df39d356 | ||
|
|
03d3811f7f | ||
|
|
83b83f7768 | ||
|
|
71138082ea | ||
|
|
cf94824426 | ||
|
|
16971ab6b9 | ||
|
|
9f75af38e3 | ||
|
|
b5e4d39b05 | ||
|
|
4d19afdbbf | ||
|
|
2ebfedce85 | ||
|
|
1a4b85b6e7 | ||
|
|
5052b80298 | ||
|
|
fada870ff0 | ||
|
|
38f456c527 | ||
|
|
e6d82ac6ee | ||
|
|
4c74ded85a | ||
|
|
43848f5c42 | ||
|
|
fb895f69a1 | ||
|
|
b204090325 | ||
|
|
1821d86911 | ||
|
|
7ce67347fb | ||
|
|
0228bbff39 | ||
|
|
6890bd7738 | ||
|
|
bc5d1dfaf3 | ||
|
|
c33aeb705f | ||
|
|
12cf8e71df | ||
|
|
ec5ddb68a8 | ||
|
|
8335596207 | ||
|
|
4f56ab2341 | ||
|
|
8b5b7ecfd9 | ||
|
|
2aa2cfc70e | ||
|
|
7265b2331f | ||
|
|
0dd56ff2a3 | ||
|
|
2443cb284e | ||
|
|
0f3aa17fb6 | ||
|
|
8f74e7d331 | ||
|
|
ee92673e1b | ||
|
|
55655efabf | ||
|
|
700e6e11fd | ||
|
|
edb47076b5 | ||
|
|
e5fd97b8d2 | ||
|
|
bc57a31859 | ||
|
|
4adb48fbbc | ||
|
|
c41d0f7d3a | ||
|
|
d34ba258b0 | ||
|
|
05d54a95b8 | ||
|
|
f16b39165b | ||
|
|
86edb26fd5 | ||
|
|
203e1bdbf9 | ||
|
|
a522c056fe | ||
|
|
31adc7d89f | ||
|
|
c559ab7c58 | ||
|
|
80610ef774 | ||
|
|
a6c943a1ad | ||
|
|
53e0dbb5cb | ||
|
|
3a0000526b | ||
|
|
1fa6941e26 | ||
|
|
9bb7ad31e6 | ||
|
|
da8c6847ad | ||
|
|
d240d044c3 | ||
|
|
1056ace80f | ||
|
|
a06c1c0cb7 | ||
|
|
7672c3d586 | ||
|
|
f361cdf1cb | ||
|
|
26d3c71bab | ||
|
|
c76396f03c | ||
|
|
059ad47336 | ||
|
|
becc068d36 | ||
|
|
94deb6bd6f | ||
|
|
cc09978b79 | ||
|
|
409dc75328 | ||
|
|
fb30c5f8dd | ||
|
|
203df6cc58 | ||
|
|
459e10d599 | ||
|
|
1ba4fd1d83 | ||
|
|
77553b8dd5 | ||
|
|
5420dbbe38 | ||
|
|
87b71dd6b9 | ||
|
|
a0bcdc2638 | ||
|
|
e42fa9f92d | ||
|
|
4586104dc7 | ||
|
|
c4c360a285 | ||
|
|
ce4860b9b6 | ||
|
|
ed87f82d21 | ||
|
|
0a82929b94 | ||
|
|
1e8ee3b813 | ||
|
|
eaab3f5271 | ||
|
|
25b05f1210 | ||
|
|
2dc1b07863 | ||
|
|
49acacec2e | ||
|
|
70d2fe6568 | ||
|
|
f28c83c6de | ||
|
|
2cf44e584c | ||
|
|
bba9027817 | ||
|
|
51859af8d9 | ||
|
|
4f60f8915d | ||
|
|
6663eb346f | ||
|
|
1d0e1ea0b5 | ||
|
|
71631621c4 | ||
|
|
31e904d84c | ||
|
|
30c9843e3d | ||
|
|
c8a834f0e8 | ||
|
|
b272c50c4c | ||
|
|
b8700e8042 | ||
|
|
73193b0565 | ||
|
|
c4eef3065f | ||
|
|
ba2a642961 | ||
|
|
979c6a573d | ||
|
|
bbb866018e | ||
|
|
7706f02294 | ||
|
|
6df7913181 | ||
|
|
c079495d1f | ||
|
|
3bf1ac5b07 | ||
|
|
091caa34c6 | ||
|
|
d507e9be39 | ||
|
|
40b3251e41 | ||
|
|
484d955ea8 | ||
|
|
8fa9f255a0 | ||
|
|
e7f11af1ca | ||
|
|
0b5c4cc442 | ||
|
|
178ddafdc7 | ||
|
|
ad316ec6e3 | ||
|
|
61b022dfc3 |
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
@@ -239,13 +239,13 @@ jobs:
|
||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||
|
||||
- name: Code quality test (Linux)
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: latest
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (Windows)
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "windows"
|
||||
with:
|
||||
@@ -253,7 +253,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (macOS)
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "darwin"
|
||||
with:
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (FreeBSD)
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "freebsd"
|
||||
with:
|
||||
@@ -269,7 +269,7 @@ jobs:
|
||||
skip-cache: true
|
||||
|
||||
- name: Code quality test (OpenBSD)
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
env:
|
||||
GOOS: "openbsd"
|
||||
with:
|
||||
@@ -291,7 +291,9 @@ jobs:
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
librclone\README.md
|
||||
backend\s3\README.md
|
||||
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -19,6 +19,11 @@ linters:
|
||||
- unconvert
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
- fieldalignment
|
||||
- shadow
|
||||
staticcheck:
|
||||
# With staticcheck there is only one setting, so to extend the implicit
|
||||
# default value it must be explicitly included.
|
||||
|
||||
@@ -41,3 +41,32 @@ single-title: # MD025
|
||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||
link-fragments: false # MD051
|
||||
|
||||
# Restrict the languages and language identifiers to use for code blocks.
|
||||
# We only want those supported by both Hugo and GitHub. These are documented
|
||||
# here:
|
||||
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
||||
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
# In addition, we only want to allow identifiers (aliases) that correspond to
|
||||
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
||||
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
||||
# by some are considered an identifier for shell scripts, i.e. an alias for
|
||||
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
||||
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
||||
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
||||
# avoids the confusion entirely.
|
||||
fenced-code-language: # MD040
|
||||
allowed_languages:
|
||||
- text
|
||||
- console
|
||||
- sh
|
||||
- bat
|
||||
- ini
|
||||
- json
|
||||
- yaml
|
||||
- go
|
||||
- python
|
||||
- c++
|
||||
- c#
|
||||
- java
|
||||
- powershell
|
||||
|
||||
134
CONTRIBUTING.md
134
CONTRIBUTING.md
@@ -38,7 +38,7 @@ and [email](https://docs.github.com/en/github/setting-up-and-managing-your-githu
|
||||
Next open your terminal, change directory to your preferred folder and initialise
|
||||
your local rclone project:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git clone https://github.com/rclone/rclone.git
|
||||
cd rclone
|
||||
git remote rename origin upstream
|
||||
@@ -53,13 +53,13 @@ executed from the rclone folder created above.
|
||||
|
||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||
|
||||
```sh
|
||||
```console
|
||||
go version
|
||||
```
|
||||
|
||||
Great, you can now compile and execute your own version of rclone:
|
||||
|
||||
```sh
|
||||
```console
|
||||
go build
|
||||
./rclone version
|
||||
```
|
||||
@@ -68,7 +68,7 @@ go build
|
||||
more accurate version number in the executable as well as enable you to specify
|
||||
more build options.) Finally make a branch to add your new feature
|
||||
|
||||
```sh
|
||||
```console
|
||||
git checkout -b my-new-feature
|
||||
```
|
||||
|
||||
@@ -80,7 +80,7 @@ and a quick view on the rclone [code organisation](#code-organisation).
|
||||
When ready - test the affected functionality and run the unit tests for the
|
||||
code you changed
|
||||
|
||||
```sh
|
||||
```console
|
||||
cd folder/with/changed/files
|
||||
go test -v
|
||||
```
|
||||
@@ -99,7 +99,7 @@ Make sure you
|
||||
|
||||
When you are done with that push your changes to GitHub:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git push -u origin my-new-feature
|
||||
```
|
||||
|
||||
@@ -119,7 +119,7 @@ or [squash your commits](#squashing-your-commits).
|
||||
|
||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git checkout my-new-feature # To switch to your branch
|
||||
git status # To see the new and changed files
|
||||
git add FILENAME # To select FILENAME for the commit
|
||||
@@ -130,7 +130,7 @@ git log # To verify the commit. Use q to quit the log
|
||||
|
||||
You can modify the message or changes in the latest commit using:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git commit --amend
|
||||
```
|
||||
|
||||
@@ -145,7 +145,7 @@ pushed to GitHub.
|
||||
|
||||
Your previously pushed commits are replaced by:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git push --force origin my-new-feature
|
||||
```
|
||||
|
||||
@@ -154,7 +154,7 @@ git push --force origin my-new-feature
|
||||
To base your changes on the latest version of the
|
||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||
|
||||
```sh
|
||||
```console
|
||||
git checkout master
|
||||
git fetch upstream
|
||||
git merge --ff-only
|
||||
@@ -170,7 +170,7 @@ If you rebase commits that have been pushed to GitHub, then you will have to
|
||||
|
||||
To combine your commits into one commit:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git log # To count the commits to squash, e.g. the last 2
|
||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||
git status # To check everything is as expected
|
||||
@@ -178,13 +178,13 @@ git status # To check everything is as expected
|
||||
|
||||
If everything is fine, then make the new combined commit:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git commit # To commit the undone commits as one
|
||||
```
|
||||
|
||||
otherwise, you may roll back using:
|
||||
|
||||
```sh
|
||||
```console
|
||||
git reflog # To check that HEAD{1} is your previous state
|
||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||
```
|
||||
@@ -219,13 +219,13 @@ to check an error return).
|
||||
rclone's tests are run from the go testing framework, so at the top
|
||||
level you can run this to run all the tests.
|
||||
|
||||
```sh
|
||||
```console
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
You can also use `make`, if supported by your platform
|
||||
|
||||
```sh
|
||||
```console
|
||||
make quicktest
|
||||
```
|
||||
|
||||
@@ -246,7 +246,7 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
```sh
|
||||
```console
|
||||
cd backend/drive
|
||||
go test -v
|
||||
```
|
||||
@@ -255,7 +255,7 @@ You can then run the integration tests which test all of rclone's
|
||||
operations. Normally these get run against the local file system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
```sh
|
||||
```console
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
@@ -268,9 +268,8 @@ If you want to use the integration test framework to run these tests
|
||||
altogether with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
```sh
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backends drive
|
||||
```console
|
||||
go run ./fstest/test_all -backends drive
|
||||
```
|
||||
|
||||
### Full integration testing
|
||||
@@ -278,19 +277,19 @@ test_all -backends drive
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
```sh
|
||||
```console
|
||||
make check
|
||||
make test
|
||||
```
|
||||
|
||||
The commands may require some extra go packages which you can install with
|
||||
|
||||
```sh
|
||||
```console
|
||||
make build_dep
|
||||
```
|
||||
|
||||
The full integration tests are run daily on the integration test server. You can
|
||||
find the results at <https://pub.rclone.org/integration-tests/>
|
||||
find the results at <https://integration.rclone.org>
|
||||
|
||||
## Code Organisation
|
||||
|
||||
@@ -349,11 +348,13 @@ If you are adding a new feature then please update the documentation.
|
||||
|
||||
The documentation sources are generally in Markdown format, in conformance
|
||||
with the CommonMark specification and compatible with GitHub Flavored
|
||||
Markdown (GFM). The markdown format is checked as part of the lint operation
|
||||
that runs automatically on pull requests, to enforce standards and consistency.
|
||||
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||
tool, which can also be integrated into editors so you can perform the same
|
||||
checks while writing.
|
||||
Markdown (GFM). The markdown format and style is checked as part of the lint
|
||||
operation that runs automatically on pull requests, to enforce standards and
|
||||
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||
tool by David Anson, which can also be integrated into editors so you can
|
||||
perform the same checks while writing. It generally follows Ciro Santilli's
|
||||
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
||||
is good source if you want to know more.
|
||||
|
||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||
@@ -382,7 +383,7 @@ If you add a new general flag (not for a backend), then document it in
|
||||
alphabetical order.
|
||||
|
||||
If you add a new backend option/flag, then it should be documented in
|
||||
the source file in the `Help:` field.
|
||||
the source file in the `Help:` field:
|
||||
|
||||
- Start with the most important information about the option,
|
||||
as a single sentence on a single line.
|
||||
@@ -404,6 +405,30 @@ the source file in the `Help:` field.
|
||||
as an unordered list, therefore a single line break is enough to
|
||||
create a new list item. Also, for enumeration texts like name of
|
||||
countries, it looks better without an ending period/full stop character.
|
||||
- You can run `make backenddocs` to verify the resulting Markdown.
|
||||
- This will update the autogenerated sections of the backend docs Markdown
|
||||
files under `docs/content`.
|
||||
- It requires you to have [Python](https://www.python.org) installed.
|
||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
||||
and you can also run this directly, optionally with the name of a backend
|
||||
as argument to only update the docs for a specific backend.
|
||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
||||
the release process. Since any manual changes in the autogenerated sections
|
||||
of the Markdown files will then be lost, we have a pull request check that
|
||||
reports error for any changes within the autogenerated sections. Should you
|
||||
have done manual changes outside of the autogenerated sections they must be
|
||||
committed, of course.
|
||||
- You can run `make serve` to verify the resulting website.
|
||||
- This will build the website and serve it locally, so you can open it in
|
||||
your web browser and verify that the end result looks OK. Check specifically
|
||||
any added links, also in light of the note above regarding different algorithms
|
||||
for generated header anchors.
|
||||
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
||||
- The `serve` make target depends on the `website` target, which runs the
|
||||
`hugo` command from the `docs` directory to build the website, and then
|
||||
it serves the website locally with an embedded web server using a command
|
||||
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
||||
can run similar Hugo commands directly as well.
|
||||
|
||||
When writing documentation for an entirely new backend,
|
||||
see [backend documentation](#backend-documentation).
|
||||
@@ -420,6 +445,11 @@ for small changes in the docs which makes it very easy. Just remember the
|
||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||
Markdown preview may not be an entirely reliable verification of the results.
|
||||
|
||||
After your changes have been merged, you can verify them on
|
||||
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
||||
current state of the master branch at 07:00 UTC. The changes will be on the main
|
||||
[rclone.org](https://rclone.org) site once they have been included in a release.
|
||||
|
||||
## Making a release
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
@@ -478,7 +508,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
||||
instructions below. These will fetch the dependency and add it to
|
||||
`go.mod` and `go.sum`.
|
||||
|
||||
```sh
|
||||
```console
|
||||
go get github.com/ncw/new_dependency
|
||||
```
|
||||
|
||||
@@ -492,7 +522,7 @@ and `go.sum` in the same commit as your other changes.
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
```sh
|
||||
```console
|
||||
go get golang.org/x/crypto
|
||||
```
|
||||
|
||||
@@ -581,8 +611,7 @@ remote or an fs.
|
||||
- Add your backend to `fstest/test_all/config.yaml`
|
||||
- Once you've done that then you can use the integration test framework from
|
||||
the project root:
|
||||
- go install ./...
|
||||
- test_all -backends remote
|
||||
- `go run ./fstest/test_all -backends remote`
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
@@ -621,44 +650,7 @@ in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Adding a new s3 provider
|
||||
|
||||
It is quite easy to add a new S3 provider to rclone.
|
||||
|
||||
You'll need to modify the following files
|
||||
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Make sure this is in alphabetical order in the `Providers` section.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
|
||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||
alphabetical order by `Provider` name, but with `AWS` first and
|
||||
`Other` last.
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
Once you've written the code, test `rclone config` works to your
|
||||
satisfaction, and check the integration tests work `go test -v -remote
|
||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
||||
pass. Some providers just can't pass the tests with control characters
|
||||
in the names so if these fail and the provider doesn't support
|
||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
||||
`SetTier` test may also fail on non AWS providers.
|
||||
|
||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
||||
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
||||
|
||||
## Writing a plugin
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ directories to and from different cloud storage providers.
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
@@ -106,6 +107,7 @@ directories to and from different cloud storage providers.
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
@@ -129,6 +131,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
|
||||
22
RELEASE.md
22
RELEASE.md
@@ -60,7 +60,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```sh
|
||||
```console
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
@@ -70,7 +70,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||
done
|
||||
|
||||
```sh
|
||||
```console
|
||||
git co go.mod go.sum
|
||||
```
|
||||
|
||||
@@ -102,7 +102,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
||||
However this tool can show which major versions might need to be
|
||||
upgraded:
|
||||
|
||||
```sh
|
||||
```console
|
||||
go run github.com/icholy/gomajor@latest list -major
|
||||
```
|
||||
|
||||
@@ -112,7 +112,7 @@ Expect API breakage when updating major versions.
|
||||
|
||||
At some point after the release run
|
||||
|
||||
```sh
|
||||
```console
|
||||
bin/tidy-beta v1.55
|
||||
```
|
||||
|
||||
@@ -159,7 +159,7 @@ which is a private repo containing artwork from sponsors.
|
||||
|
||||
Create an update website branch based off the last release
|
||||
|
||||
```sh
|
||||
```console
|
||||
git co -b update-website
|
||||
```
|
||||
|
||||
@@ -167,19 +167,19 @@ If the branch already exists, double check there are no commits that need saving
|
||||
|
||||
Now reset the branch to the last release
|
||||
|
||||
```sh
|
||||
```console
|
||||
git reset --hard v1.64.0
|
||||
```
|
||||
|
||||
Create the changes, check them in, test with `make serve` then
|
||||
|
||||
```sh
|
||||
```console
|
||||
make upload_test_website
|
||||
```
|
||||
|
||||
Check out <https://test.rclone.org> and when happy
|
||||
|
||||
```sh
|
||||
```console
|
||||
make upload_website
|
||||
```
|
||||
|
||||
@@ -189,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```sh
|
||||
```console
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```sh
|
||||
```console
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
@@ -204,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
Note that you can't only build one architecture - you need to build them all.
|
||||
|
||||
```sh
|
||||
```console
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
```
|
||||
|
||||
@@ -4,6 +4,7 @@ package all
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/archive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
|
||||
679
backend/archive/archive.go
Normal file
679
backend/archive/archive.go
Normal file
@@ -0,0 +1,679 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
|
||||
// FIXME factor common code between backends out - eg VFS initialization
|
||||
|
||||
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
||||
|
||||
// Factor more stuff out if possible
|
||||
|
||||
// Odd stats which are probably coming from the VFS
|
||||
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
||||
|
||||
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
||||
// at multiple streams - need cache mode setting?
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Import all the required archivers here
|
||||
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
||||
_ "github.com/rclone/rclone/backend/archive/zip"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "archive",
|
||||
Description: "Read archives",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: `Remote to wrap to read archives from.
|
||||
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or "myremote:".
|
||||
|
||||
If this is left empty, then the archive backend will use the root as
|
||||
the remote.
|
||||
|
||||
This means that you can use :archive:remote:path and it will be
|
||||
equivalent to setting remote="remote:path".
|
||||
`,
|
||||
Required: false,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// Fs represents a archive of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
f fs.Fs // remote we are wrapping
|
||||
wrapper fs.Fs // fs that wraps us
|
||||
|
||||
mu sync.Mutex // protects the below
|
||||
archives map[string]*archive // the archives we have, by path
|
||||
}
|
||||
|
||||
// A single open archive
|
||||
type archive struct {
|
||||
archiver archiver.Archiver // archiver responsible
|
||||
remote string // path to the archive
|
||||
prefix string // prefix to add on to listings
|
||||
root string // root of the archive to remove from listings
|
||||
mu sync.Mutex // protects the following variables
|
||||
f fs.Fs // the archive Fs, may be nil
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func findArchive(remote string) *archive {
|
||||
// FIXME use something faster than linear search?
|
||||
for _, archiver := range archiver.Archivers {
|
||||
if strings.HasSuffix(remote, archiver.Extension) {
|
||||
return &archive{
|
||||
archiver: archiver,
|
||||
remote: remote,
|
||||
prefix: remote,
|
||||
root: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find an archive buried in remote
|
||||
func subArchive(remote string) *archive {
|
||||
archive := findArchive(remote)
|
||||
if archive != nil {
|
||||
return archive
|
||||
}
|
||||
parent := path.Dir(remote)
|
||||
if parent == "/" || parent == "." {
|
||||
return nil
|
||||
}
|
||||
return subArchive(parent)
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func (f *Fs) findArchive(remote string) (archive *archive) {
|
||||
archive = findArchive(remote)
|
||||
if archive != nil {
|
||||
f.mu.Lock()
|
||||
f.archives[remote] = archive
|
||||
f.mu.Unlock()
|
||||
}
|
||||
return archive
|
||||
}
|
||||
|
||||
// Instantiate archive if it hasn't been instantiated yet
|
||||
//
|
||||
// This is done lazily so that we can list a directory full of
|
||||
// archives without opening them all.
|
||||
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
if a.f != nil {
|
||||
return a.f, nil
|
||||
}
|
||||
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
||||
}
|
||||
a.f = newFs
|
||||
return a.f, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
origRoot := root
|
||||
|
||||
// If remote is empty, use the root instead
|
||||
if remote == "" {
|
||||
remote = root
|
||||
root = ""
|
||||
}
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
if remote == "" {
|
||||
remote = "/"
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
|
||||
_ = isDirectory
|
||||
|
||||
foundArchive := subArchive(remote)
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
||||
// Archive path
|
||||
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
||||
// Path to the archive
|
||||
archiveRemote := remote[:len(foundArchive.remote)]
|
||||
// Remote is archive leaf name
|
||||
foundArchive.remote = path.Base(archiveRemote)
|
||||
foundArchive.prefix = ""
|
||||
// Point remote to archive file
|
||||
remote = archiveRemote
|
||||
}
|
||||
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(root) == "." {
|
||||
root = strings.TrimSuffix(root, ".")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(remote, root)
|
||||
wrappedFs, err := cache.Get(ctx, remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
//root: path.Join(remotePath, root),
|
||||
root: origRoot,
|
||||
opt: *opt,
|
||||
f: wrappedFs,
|
||||
archives: make(map[string]*archive),
|
||||
}
|
||||
cache.PinUntilFinalized(f.f, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(f, "Root is an archive")
|
||||
if err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
||||
}
|
||||
return foundArchive.init(ctx, f.f)
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("archive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.f.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.f.Hashes()
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.f.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, dir)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantMove
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
do := f.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
do := f.f.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
notifyFunc(path, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, ch)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.f.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
||||
} else {
|
||||
o, err = f.f.Put(ctx, in, src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.f.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Find the Fs for the directory
|
||||
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
subFs = f.f
|
||||
|
||||
// FIXME should do this with a better datastructure like a prefix tree
|
||||
// FIXME want to find the longest first otherwise nesting won't work
|
||||
dirSlash := dir + "/"
|
||||
for archiverRemote, archive := range f.archives {
|
||||
subRemote := archiverRemote + "/"
|
||||
if strings.HasPrefix(dirSlash, subRemote) {
|
||||
subFs, err = archive.init(ctx, f.f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return subFs, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err = subFs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, entry := range entries {
|
||||
// Can only unarchive files
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
remote := o.Remote()
|
||||
archive := f.findArchive(remote)
|
||||
if archive != nil {
|
||||
// Overwrite entry with directory
|
||||
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote archive file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o, err := subFs.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision is the greatest precision of all the archivers
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if do := f.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
do := f.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
o, err := do(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
do := f.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.f.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
do := f.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, size)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
do := f.f.Features().OpenChunkWriter
|
||||
if do == nil {
|
||||
return info, nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, src, options...)
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.f.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.f.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.OpenChunkWriter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
// FIXME _ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
221
backend/archive/archive_internal_test.go
Normal file
221
backend/archive/archive_internal_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
//go:build !plan9
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// FIXME need to test Open with seek
|
||||
|
||||
// run - run a shell command
|
||||
func run(t *testing.T, args ...string) {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
fs.Debugf(nil, "run args = %v", args)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf(`
|
||||
----------------------------
|
||||
Failed to run %v: %v
|
||||
Command output was:
|
||||
%s
|
||||
----------------------------
|
||||
`, args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// check the dst and src are identical
|
||||
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
||||
Farchive, err := cache.Get(ctx, dstArchive)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
Fsrc, err := cache.Get(ctx, src)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var matches bytes.Buffer
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: Farchive,
|
||||
Fsrc: Fsrc,
|
||||
Match: &matches,
|
||||
}
|
||||
|
||||
for _, action := range []string{"Check", "Download"} {
|
||||
t.Run(action, func(t *testing.T) {
|
||||
matches.Reset()
|
||||
if action == "Download" {
|
||||
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
||||
} else {
|
||||
assert.NoError(t, operations.Check(ctx, &opt))
|
||||
}
|
||||
if expectedCount > 0 {
|
||||
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
// Check we can run NewObject on all files and read them
|
||||
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
remote := srcObj.Remote()
|
||||
archiveObj, err := Farchive.NewObject(ctx, remote)
|
||||
require.NoError(t, err, remote)
|
||||
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
||||
|
||||
// Test that the contents are the same
|
||||
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
||||
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
||||
assert.Equal(t, srcBuf, archiveBuf)
|
||||
|
||||
if len(srcBuf) < 81 {
|
||||
return
|
||||
}
|
||||
|
||||
// Tests that Open works with SeekOption
|
||||
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
||||
|
||||
// Tests that Open works with RangeOption
|
||||
for _, test := range []struct {
|
||||
ro fs.RangeOption
|
||||
wantStart, wantEnd int
|
||||
}{
|
||||
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
||||
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
||||
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
||||
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
||||
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
||||
} {
|
||||
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
||||
foundAt := strings.Index(srcBuf, got)
|
||||
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
||||
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
||||
}
|
||||
|
||||
// Test that the modtimes are correct
|
||||
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
||||
|
||||
// Test that the sizes are correct
|
||||
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
||||
|
||||
// Test that Strings are OK
|
||||
assert.Equal(t, srcObj.String(), archiveObj.String())
|
||||
}))
|
||||
})
|
||||
|
||||
// t.Logf("Fdst ------------- %v", Fdst)
|
||||
// operations.List(ctx, Fdst, os.Stdout)
|
||||
// t.Logf("Fsrc ------------- %v", Fsrc)
|
||||
// operations.List(ctx, Fsrc, os.Stdout)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
||||
ctx := context.Background()
|
||||
checkFiles := 1000
|
||||
|
||||
// create random test input files
|
||||
inputRoot := t.TempDir()
|
||||
input := filepath.Join(inputRoot, archiveName)
|
||||
require.NoError(t, os.Mkdir(input, 0777))
|
||||
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
||||
|
||||
// Create the archive
|
||||
output := t.TempDir()
|
||||
zipFile := path.Join(output, archiveName)
|
||||
archiveFn(t, zipFile, input)
|
||||
|
||||
// Check the archive itself
|
||||
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
||||
|
||||
// Now check a subdirectory
|
||||
fis, err := os.ReadDir(input)
|
||||
require.NoError(t, err)
|
||||
subDir := "NOT FOUND"
|
||||
aFile := "NOT FOUND"
|
||||
for _, fi := range fis {
|
||||
if fi.IsDir() {
|
||||
subDir = fi.Name()
|
||||
} else {
|
||||
aFile = fi.Name()
|
||||
}
|
||||
}
|
||||
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
||||
|
||||
// Now check a single file
|
||||
fiCtx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ "+aFile))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
||||
|
||||
// Now check the level above
|
||||
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
||||
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
||||
}
|
||||
|
||||
// Make sure we have the executable named
|
||||
func skipIfNoExe(t *testing.T, exeName string) {
|
||||
_, err := exec.LookPath(exeName)
|
||||
if err != nil {
|
||||
t.Skipf("%s executable not installed", exeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func TestArchiveZip(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "zip")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
||||
oldcwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.Chdir(input))
|
||||
defer func() {
|
||||
require.NoError(t, os.Chdir(oldcwd))
|
||||
}()
|
||||
run(t, "zip", "-9r", output, ".")
|
||||
})
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and squashfs as external binaries.
|
||||
func TestArchiveSquashfs(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "mksquashfs")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
||||
run(t, "mksquashfs", input, output)
|
||||
})
|
||||
}
|
||||
67
backend/archive/archive_test.go
Normal file
67
backend/archive/archive_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Test Archive filesystem interface
|
||||
package archive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
||||
// In these tests we receive objects from the underlying remote which don't implement these methods
|
||||
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := t.TempDir()
|
||||
name := "TestArchiveLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := ":memory:"
|
||||
name := "TestArchiveMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
7
backend/archive/archive_unsupported.go
Normal file
7
backend/archive/archive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for archive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
24
backend/archive/archiver/archiver.go
Normal file
24
backend/archive/archiver/archiver.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Package archiver registers all the archivers
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Archiver describes an archive package
|
||||
type Archiver struct {
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
||||
Extension string
|
||||
}
|
||||
|
||||
// Archivers is a slice of all registered archivers
|
||||
var Archivers []Archiver
|
||||
|
||||
// Register adds the archivers provided to the list of known archivers
|
||||
func Register(as ...Archiver) {
|
||||
Archivers = append(Archivers, as...)
|
||||
}
|
||||
233
backend/archive/base/base.go
Normal file
233
backend/archive/base/base.go
Normal file
@@ -0,0 +1,233 @@
|
||||
// Package base is a base archive Fs
|
||||
package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // archive object
|
||||
remote string // remote of the archive object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
VFS := vfs.New(wrappedFs, nil)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
165
backend/archive/squashfs/cache.go
Normal file
165
backend/archive/squashfs/cache.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package squashfs
|
||||
|
||||
// Could just be using bare object Open with RangeRequest which
|
||||
// would transfer the minimum amount of data but may be slower.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/diskfs/go-diskfs/backend"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Cache file handles for accessing the file
|
||||
type cache struct {
|
||||
node vfs.Node
|
||||
fhsMu sync.Mutex
|
||||
fhs []cacheHandle
|
||||
}
|
||||
|
||||
// A cached file handle
|
||||
type cacheHandle struct {
|
||||
offset int64
|
||||
fh vfs.Handle
|
||||
}
|
||||
|
||||
// Make a new cache
|
||||
func newCache(node vfs.Node) *cache {
|
||||
return &cache{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
// Get a vfs.Handle from the pool or open one
|
||||
//
|
||||
// This tries to find an open file handle which doesn't require seeking.
|
||||
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
if len(c.fhs) > 0 {
|
||||
// Look for exact match first
|
||||
for i, cfh := range c.fhs {
|
||||
if cfh.offset == off {
|
||||
// fs.Debugf(nil, "CACHE MATCH")
|
||||
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
||||
return cfh.fh, nil
|
||||
|
||||
}
|
||||
}
|
||||
// fs.Debugf(nil, "CACHE MISS")
|
||||
// Just take the first one if not found
|
||||
cfh := c.fhs[0]
|
||||
c.fhs = c.fhs[1:]
|
||||
return cfh.fh, nil
|
||||
}
|
||||
|
||||
fh, err = c.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Close a vfs.Handle or return it to the pool
|
||||
//
|
||||
// off should be the offset the file handle would read from without seeking
|
||||
func (c *cache) close(fh vfs.Handle, off int64) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
c.fhs = append(c.fhs, cacheHandle{
|
||||
offset: off,
|
||||
fh: fh,
|
||||
})
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
||||
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
||||
// error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
||||
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p) bytes,
|
||||
// ReadAt blocks until either all the data is available or an error occurs.
|
||||
// In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
||||
// source, ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
||||
// not affect nor be affected by the underlying seek offset.
|
||||
//
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
||||
// source.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
fh, err := c.open(off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
defer func() {
|
||||
c.close(fh, off+int64(len(p)))
|
||||
}()
|
||||
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
||||
return fh.ReadAt(p, off)
|
||||
}
|
||||
|
||||
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
||||
|
||||
// WriteAt method dummy stub to satisfy interface
|
||||
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Seek method dummy stub to satisfy interface
|
||||
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Read method dummy stub to satisfy interface
|
||||
func (c *cache) Read(p []byte) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
func (c *cache) Stat() (fs.FileInfo, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Close the file
|
||||
func (c *cache) Close() (err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
// Close any open file handles
|
||||
for i := range c.fhs {
|
||||
fh := &c.fhs[i]
|
||||
newErr := fh.fh.Close()
|
||||
if err == nil {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
c.fhs = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Sys returns OS-specific file for ioctl calls via fd
|
||||
func (c *cache) Sys() (*os.File, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Writable returns file for read-write operations
|
||||
func (c *cache) Writable() (backend.WritableFile, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// check interfaces
|
||||
var _ backend.Storage = (*cache)(nil)
|
||||
446
backend/archive/squashfs/squashfs.go
Normal file
446
backend/archive/squashfs/squashfs.go
Normal file
@@ -0,0 +1,446 @@
|
||||
// Package squashfs implements a squashfs archiver for the archive backend
|
||||
package squashfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".sqfs",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
sqfs *squashfs.FileSystem // interface to the squashfs
|
||||
c *cache
|
||||
node vfs.Node // squashfs file object - set if reading
|
||||
remote string // remote of the squashfs file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
c := newCache(node)
|
||||
|
||||
// FIXME blocksize
|
||||
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
sqfs: sqfs,
|
||||
c: c,
|
||||
remote: remote,
|
||||
root: strings.Trim(root, "/"),
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
if prefix == "" {
|
||||
f.prefixSlash = ""
|
||||
}
|
||||
|
||||
singleObject := false
|
||||
|
||||
// Find the directory the root points to
|
||||
if f.root != "" && !strings.HasSuffix(root, "/") {
|
||||
native, err := f.toNative("")
|
||||
if err == nil {
|
||||
native = strings.TrimRight(native, "/")
|
||||
_, err := f.newObjectNative(native)
|
||||
if err == nil {
|
||||
// If it pointed to a file, find the directory above
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Squashfs %q", f.name)
|
||||
}
|
||||
|
||||
// This turns a remote into a native path in the squashfs starting with a /
|
||||
func (f *Fs) toNative(remote string) (string, error) {
|
||||
native := strings.Trim(remote, "/")
|
||||
if f.prefix == "" {
|
||||
native = "/" + native
|
||||
} else if native == f.prefix {
|
||||
native = "/"
|
||||
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
||||
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
||||
} else {
|
||||
native = native[len(f.prefix):]
|
||||
}
|
||||
if f.root != "" {
|
||||
native = "/" + f.root + native
|
||||
}
|
||||
return native, nil
|
||||
}
|
||||
|
||||
// Turn a (nativeDir, leaf) into a remote
|
||||
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
||||
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
||||
dir := nativeDir
|
||||
if f.root != "" {
|
||||
dir = strings.TrimPrefix(dir, "/"+f.root)
|
||||
}
|
||||
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
||||
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
||||
return remote
|
||||
}
|
||||
|
||||
// Convert a FileInfo into an Object from native dir
|
||||
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: f.fromNative(nativeDir, item.Name()),
|
||||
size: item.Size(),
|
||||
modTime: item.ModTime(),
|
||||
item: item,
|
||||
}
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
|
||||
nativeDir, err := f.toNative(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items, err := f.sqfs.ReadDir(nativeDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, 0, len(items))
|
||||
for _, fi := range items {
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
// fs.Debugf(item.Name(), "entry = %#v", item)
|
||||
var entry fs.DirEntry
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
||||
}
|
||||
if item.IsDir() {
|
||||
var remote = f.fromNative(nativeDir, item.Name())
|
||||
entry = fs.NewDir(remote, item.ModTime())
|
||||
} else {
|
||||
if item.Mode().IsRegular() {
|
||||
entry = f.objectFromFileInfo(nativeDir, item)
|
||||
} else {
|
||||
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
||||
continue
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// newObjectNative finds the object at the native path passed in
|
||||
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
||||
// get the path and filename
|
||||
dir, leaf := path.Split(nativePath)
|
||||
dir = strings.TrimRight(dir, "/")
|
||||
leaf = strings.Trim(leaf, "/")
|
||||
|
||||
// FIXME need to detect directory not found
|
||||
fis, err := f.sqfs.ReadDir(dir)
|
||||
if err != nil {
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if fi.Name() == leaf {
|
||||
if fi.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
o = f.objectFromFileInfo(dir, item)
|
||||
break
|
||||
}
|
||||
}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
|
||||
nativePath, err := f.toNative(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectNative(nativePath)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw squashfs file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
item squashfs.FileStat
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Turn a squashfs path into a full path for the parent Fs
|
||||
// func (o *Object) path(remote string) string {
|
||||
// return path.Join(o.fs.prefix, remote)
|
||||
// }
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote, err := o.fs.toNative(o.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Opening %q", remote)
|
||||
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
||||
fh, err := o.item.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = fh.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
||||
return readers.NewLimitedReadCloser(fh, limit), nil
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
385
backend/archive/zip/zip.go
Normal file
385
backend/archive/zip/zip.go
Normal file
@@ -0,0 +1,385 @@
|
||||
// Package zip implements a zip archiver for the archive backend
|
||||
package zip
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".zip",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // zip file object - set if reading
|
||||
remote string // remote of the zip file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
dt dirtree.DirTree // read from zipfile
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// Read the contents of the zip file
|
||||
singleObject, err := f.readZip()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Zip %q", f.name)
|
||||
}
|
||||
|
||||
// readZip the zip file into f
|
||||
//
|
||||
// Returns singleObject=true if f.root points to a file
|
||||
func (f *Fs) readZip() (singleObject bool, err error) {
|
||||
if f.node == nil {
|
||||
return singleObject, fs.ErrorDirNotFound
|
||||
}
|
||||
size := f.node.Size()
|
||||
if size < 0 {
|
||||
return singleObject, errors.New("can't read from zip file with unknown size")
|
||||
}
|
||||
r, err := f.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
zr, err := zip.NewReader(r, size)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
||||
}
|
||||
dt := dirtree.New()
|
||||
for _, file := range zr.File {
|
||||
remote := strings.Trim(path.Clean(file.Name), "/")
|
||||
if remote == "." {
|
||||
remote = ""
|
||||
}
|
||||
remote = path.Join(f.prefix, remote)
|
||||
if f.root != "" {
|
||||
// Ignore all files outside the root
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
continue
|
||||
}
|
||||
if remote == f.root {
|
||||
remote = ""
|
||||
} else {
|
||||
remote = strings.TrimPrefix(remote, f.root+"/")
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(file.Name, "/") {
|
||||
dir := fs.NewDir(remote, file.Modified)
|
||||
dt.AddDir(dir)
|
||||
} else {
|
||||
if remote == "" {
|
||||
remote = path.Base(f.root)
|
||||
singleObject = true
|
||||
dt = dirtree.New()
|
||||
}
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
fh: &file.FileHeader,
|
||||
file: file,
|
||||
}
|
||||
dt.Add(o)
|
||||
if singleObject {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
dt.CheckParents("")
|
||||
dt.Sort()
|
||||
f.dt = dt
|
||||
//fs.Debugf(nil, "dt = %v", dt)
|
||||
return singleObject, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
// _, err = f.strip(dir)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
entries, ok := f.dt[dir]
|
||||
if !ok {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
if f.dt == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
_, entry := f.dt.Find(remote)
|
||||
if entry == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
o, ok := entry.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.CRC32)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
fh *zip.FileHeader
|
||||
file *zip.File
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.fh.UncompressedSize64)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.fh.Modified
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
if ht == hash.CRC32 {
|
||||
// FIXME return empty CRC if writing
|
||||
if o.f.dt == nil {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc, err = o.file.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = io.CopyN(io.Discard, rc, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
return readers.NewLimitedReadCloser(rc, limit), nil
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -56,6 +56,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -843,15 +844,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
var entries fs.DirEntries
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
subDirClient := f.dirClient(dir)
|
||||
|
||||
// Checking whether directory exists
|
||||
_, err := subDirClient.GetProperties(ctx, nil)
|
||||
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
|
||||
return entries, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||
@@ -863,7 +881,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
for _, directory := range resp.Segment.Directories {
|
||||
// Name *string `xml:"Name"`
|
||||
@@ -889,7 +907,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if directory.Properties.ContentLength != nil {
|
||||
entry.SetSize(*directory.Properties.ContentLength)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, file := range resp.Segment.Files {
|
||||
leaf := f.opt.Enc.ToStandardPath(*file.Name)
|
||||
@@ -903,10 +924,13 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if file.Properties.LastWriteTime != nil {
|
||||
entry.modTime = *file.Properties.LastWriteTime
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1450,6 +1474,7 @@ var (
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -48,6 +48,14 @@ type LifecycleRule struct {
|
||||
FileNamePrefix string `json:"fileNamePrefix"`
|
||||
}
|
||||
|
||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
||||
type ServerSideEncryption struct {
|
||||
Mode string `json:"mode"`
|
||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
@@ -261,21 +269,22 @@ type GetFileInfoRequest struct {
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
@@ -325,21 +334,25 @@ type CancelLargeFileResponse struct {
|
||||
|
||||
// CopyFileRequest is as passed to b2_copy_file
|
||||
type CopyFileRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||
}
|
||||
|
||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||
type CopyPartRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
||||
}
|
||||
|
||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
||||
|
||||
171
backend/b2/b2.go
171
backend/b2/b2.go
@@ -8,7 +8,9 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -53,6 +55,9 @@ const (
|
||||
nameHeader = "X-Bz-File-Name"
|
||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||
retryAfterHeader = "Retry-After"
|
||||
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
|
||||
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
|
||||
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
@@ -67,7 +72,7 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
@@ -252,6 +257,51 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: "AES256",
|
||||
Help: "Advanced Encryption Standard (256 bits key length)",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
||||
|
||||
Alternatively you can provide --sse-customer-key.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
|
||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
||||
`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -274,6 +324,10 @@ type Options struct {
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
Lifecycle int `config:"lifecycle"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -504,6 +558,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Endpoint == "" {
|
||||
opt.Endpoint = defaultEndpoint
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
|
||||
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
|
||||
} else if opt.SSECustomerKeyBase64 != "" {
|
||||
// Decode the Base64-encoded key and store it in the SSECustomerKey field
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
|
||||
}
|
||||
opt.SSECustomerKey = string(decoded)
|
||||
} else {
|
||||
// Encode the raw key as Base64
|
||||
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||
// Calculate CustomerKeyMd5 if not supplied
|
||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
@@ -1435,6 +1507,16 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||
DestBucketID: destBucketID,
|
||||
}
|
||||
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
|
||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: f.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: f.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||
}
|
||||
if newInfo == nil {
|
||||
request.MetadataDirective = "COPY"
|
||||
} else {
|
||||
@@ -1866,9 +1948,10 @@ var _ io.ReadCloser = &openFile{}
|
||||
|
||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
Method: method,
|
||||
Options: options,
|
||||
NoResponse: method == "HEAD",
|
||||
ExtraHeaders: map[string]string{},
|
||||
}
|
||||
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
@@ -1886,6 +1969,11 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
bucket, bucketPath := o.split()
|
||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -2150,6 +2238,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
var response api.FileInfo
|
||||
// Don't retry, return a retry error instead
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -2241,7 +2334,10 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
t, path := api.RemoveVersion(bucketPath)
|
||||
if !t.IsZero() {
|
||||
return o.fs.deleteByID(ctx, o.id, path)
|
||||
}
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
@@ -2264,32 +2360,36 @@ func (o *Object) ID() string {
|
||||
|
||||
var lifecycleHelp = fs.CommandHelp{
|
||||
Name: "lifecycle",
|
||||
Short: "Read or set the lifecycle for a bucket",
|
||||
Short: "Read or set the lifecycle for a bucket.",
|
||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
To show the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket
|
||||
` + "```" + `
|
||||
|
||||
This will dump something like this showing the lifecycle rules.
|
||||
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"daysFromHidingToDeleting": 1,
|
||||
"daysFromUploadingToHiding": null,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||
"fileNamePrefix": ""
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
If there are no lifecycle rules (the default) then it will just return [].
|
||||
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `.
|
||||
|
||||
To reset the current lifecycle rules:
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
This will run and then print the new lifecycle rules as above.
|
||||
|
||||
@@ -2301,14 +2401,17 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
||||
the config also which will mean deletions won't cause versions but
|
||||
overwrites will still cause versions to be made.
|
||||
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```console" + `
|
||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||
` + "```" + `
|
||||
|
||||
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||
`,
|
||||
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`,
|
||||
Opts: map[string]string{
|
||||
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||
"daysFromHidingToDeleting": `After a file has been hidden for this many days
|
||||
it is deleted. 0 is off.`,
|
||||
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
|
||||
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
|
||||
large file versions after this many days.`,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2391,13 +2494,14 @@ max-age, which defaults to 24 hours.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```console" + `
|
||||
rclone backend cleanup b2:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
"max-age": "Max age of upload to delete.",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2420,8 +2524,9 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||
` + "```",
|
||||
}
|
||||
|
||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||
|
||||
@@ -144,6 +144,14 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
request.ContentType = newInfo.ContentType
|
||||
request.Info = newInfo.Info
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
request.ServerSideEncryption = &api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
@@ -295,6 +303,12 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
ContentLength: &sizeWithHash,
|
||||
}
|
||||
|
||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
||||
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
||||
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
@@ -334,6 +348,17 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||
PartNumber: int64(part + 1),
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
|
||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
||||
Mode: "SSE-C",
|
||||
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
||||
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
||||
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
||||
}
|
||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -86,13 +87,11 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if usesJWTAuth(m) {
|
||||
err = refreshJWTToken(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||
}
|
||||
@@ -113,6 +112,11 @@ func init() {
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "config_credentials",
|
||||
Help: "Box App config.json contents.\n\nLeave blank normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
@@ -183,9 +187,17 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
func usesJWTAuth(m configmap.Mapper) bool {
|
||||
jsonFile, okFile := m.Get("box_config_file")
|
||||
jsonFileCredentials, okCredentials := m.Get("config_credentials")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
|
||||
}
|
||||
|
||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
boxSubType, _ := m.Get("box_sub_type")
|
||||
|
||||
boxConfig, err := getBoxConfig(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get box config: %w", err)
|
||||
}
|
||||
@@ -204,12 +216,19 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) {
|
||||
configFileCredentials, _ := m.Get("config_credentials")
|
||||
configFileBytes := []byte(configFileCredentials)
|
||||
|
||||
if configFileCredentials == "" {
|
||||
configFile, _ := m.Get("box_config_file")
|
||||
configFileBytes, err = os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
|
||||
err = json.Unmarshal(configFileBytes, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||
}
|
||||
@@ -484,15 +503,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
if ts != nil {
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
if usesJWTAuth(m) {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
err := refreshJWTToken(ctx, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
@@ -705,9 +721,27 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
@@ -717,14 +751,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
} else if info.Type == api.ItemTypeFile {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
@@ -740,12 +782,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -1741,6 +1783,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
1
backend/cache/utils_test.go
vendored
1
backend/cache/utils_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
@@ -46,6 +44,7 @@ const (
|
||||
minCompressionRatio = 1.1
|
||||
|
||||
gzFileExt = ".gz"
|
||||
zstdFileExt = ".zst"
|
||||
metaFileExt = ".json"
|
||||
uncompressedFileExt = ".bin"
|
||||
)
|
||||
@@ -54,6 +53,7 @@ const (
|
||||
const (
|
||||
Uncompressed = 0
|
||||
Gzip = 2
|
||||
Zstd = 4
|
||||
)
|
||||
|
||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||
@@ -66,6 +66,10 @@ func init() {
|
||||
Value: "gzip",
|
||||
Help: "Standard gzip compression with fastest parameters.",
|
||||
},
|
||||
{
|
||||
Value: "zstd",
|
||||
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
|
||||
},
|
||||
}
|
||||
|
||||
// Register our remote
|
||||
@@ -87,17 +91,23 @@ func init() {
|
||||
Examples: compressionModeOptions,
|
||||
}, {
|
||||
Name: "level",
|
||||
Help: `GZIP compression level (-2 to 9).
|
||||
|
||||
Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
Advanced: true,
|
||||
Help: `GZIP (levels -2 to 9):
|
||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
||||
- -1 (default) — recommended; equivalent to level 5.
|
||||
- 0 — turns off compression.
|
||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
||||
|
||||
ZSTD (levels 0 to 4):
|
||||
- 0 — turns off compression entirely.
|
||||
- 1 — fastest compression with the lowest ratio.
|
||||
- 2 (default) — good balance of speed and compression.
|
||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
||||
- 4 — best possible compression ratio (highest CPU cost).
|
||||
|
||||
Notes:
|
||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "ram_cache_limit",
|
||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||
@@ -112,6 +122,47 @@ this limit will be cached on disk.`,
|
||||
})
|
||||
}
|
||||
|
||||
// compressionModeHandler defines the interface for handling different compression modes
|
||||
type compressionModeHandler interface {
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
processFileNameGetFileExtension(compressionMode int) string
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
isCompressible(r io.Reader, compressionMode int) (bool, error)
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error)
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error)
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
@@ -125,12 +176,13 @@ type Options struct {
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
mode int // compression mode id
|
||||
features *fs.Features // optional features
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
mode int // compression mode id
|
||||
features *fs.Features // optional features
|
||||
modeHandler compressionModeHandler // compression mode handler
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
@@ -167,13 +219,28 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||
}
|
||||
|
||||
compressionMode := compressionModeFromName(opt.CompressionMode)
|
||||
var modeHandler compressionModeHandler
|
||||
|
||||
switch compressionMode {
|
||||
case Gzip:
|
||||
modeHandler = &gzipModeHandler{}
|
||||
case Zstd:
|
||||
modeHandler = &zstdModeHandler{}
|
||||
case Uncompressed:
|
||||
modeHandler = &uncompressedModeHandler{}
|
||||
default:
|
||||
modeHandler = &unknownModeHandler{}
|
||||
}
|
||||
|
||||
// Create the wrapping fs
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
mode: compressionModeFromName(opt.CompressionMode),
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
opt: *opt,
|
||||
mode: compressionMode,
|
||||
modeHandler: modeHandler,
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
@@ -215,10 +282,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
return f, err
|
||||
}
|
||||
|
||||
// compressionModeFromName converts a compression mode name to its int representation.
|
||||
func compressionModeFromName(name string) int {
|
||||
switch name {
|
||||
case "gzip":
|
||||
return Gzip
|
||||
case "zstd":
|
||||
return Zstd
|
||||
default:
|
||||
return Uncompressed
|
||||
}
|
||||
@@ -242,7 +312,7 @@ func base64ToInt64(str string) (int64, error) {
|
||||
|
||||
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
||||
// Returns -2 for the original size if the file is uncompressed.
|
||||
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
|
||||
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
|
||||
// Separate the filename and size from the extension
|
||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||
if extensionPos == -1 {
|
||||
@@ -261,7 +331,8 @@ func processFileName(compressedFileName string) (origFileName string, extension
|
||||
if err != nil {
|
||||
return "", "", 0, errors.New("could not decode size")
|
||||
}
|
||||
return match[1], gzFileExt, size, nil
|
||||
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
|
||||
return match[1], ext, size, nil
|
||||
}
|
||||
|
||||
// Generates the file name for a metadata file
|
||||
@@ -286,11 +357,15 @@ func unwrapMetadataFile(filename string) (string, bool) {
|
||||
|
||||
// makeDataName generates the file name for a data file with specified compression mode
|
||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||
if mode != Uncompressed {
|
||||
switch mode {
|
||||
case Gzip:
|
||||
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
||||
} else {
|
||||
case Zstd:
|
||||
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
|
||||
default:
|
||||
newRemote = remote + uncompressedFileExt
|
||||
}
|
||||
|
||||
return newRemote
|
||||
}
|
||||
|
||||
@@ -304,7 +379,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
|
||||
|
||||
// addData parses an object and adds it to the DirEntries
|
||||
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
||||
origFileName, _, size, err := processFileName(o.Remote())
|
||||
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Error on parsing file name: %v", err)
|
||||
return
|
||||
@@ -427,8 +502,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
}
|
||||
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading metadata: %w", err)
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -437,7 +516,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
// returns a multireader with the bytes that were read to determine mime type
|
||||
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
buf := make([]byte, heuristicBytes)
|
||||
n, err := in.Read(buf)
|
||||
@@ -446,7 +525,7 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
||||
return nil, false, "", err
|
||||
}
|
||||
mime := mimetype.Detect(buf)
|
||||
compressible, err = isCompressible(bytes.NewReader(buf))
|
||||
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
|
||||
if err != nil {
|
||||
return nil, false, "", err
|
||||
}
|
||||
@@ -454,26 +533,6 @@ func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool,
|
||||
return wrap(in), compressible, mime.String(), nil
|
||||
}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func isCompressible(r io.Reader) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err := io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// verifyObjectHash verifies the Objects hash
|
||||
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
@@ -494,9 +553,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
||||
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
type compressionResult struct {
|
||||
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
|
||||
err error
|
||||
meta sgzip.GzipMetadata
|
||||
meta T
|
||||
}
|
||||
|
||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||
@@ -537,106 +596,18 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||
}
|
||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
|
||||
}
|
||||
finfo, err := tempFile.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
|
||||
}
|
||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||
}
|
||||
|
||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
results := make(chan compressionResult)
|
||||
go func() {
|
||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||
if err != nil {
|
||||
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(gz, in)
|
||||
gzErr := gz.Close()
|
||||
if gzErr != nil {
|
||||
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
|
||||
if err == nil {
|
||||
err = gzErr
|
||||
}
|
||||
}
|
||||
closeErr := pipeWriter.Close()
|
||||
if closeErr != nil {
|
||||
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}
|
||||
results <- compressionResult{err: err, meta: gz.MetaData()}
|
||||
}()
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the compressed data.
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
// unwrap the accounting again
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// add the hasher and re-wrap the accounting
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
// Check whether we got an error during compression
|
||||
result := <-results
|
||||
err = result.err
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Generate metadata
|
||||
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
|
||||
// Check the hashes of the compressed data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return o, meta, nil
|
||||
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
|
||||
}
|
||||
|
||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||
@@ -680,7 +651,8 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
|
||||
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
|
||||
}
|
||||
|
||||
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
||||
@@ -751,7 +723,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// Get our file compressibility
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -771,7 +743,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
found := err == nil
|
||||
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1090,11 +1062,12 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
|
||||
|
||||
// ObjectMetadata describes the metadata for an Object.
|
||||
type ObjectMetadata struct {
|
||||
Mode int // Compression mode of the file.
|
||||
Size int64 // Size of the object.
|
||||
MD5 string // MD5 hash of the file.
|
||||
MimeType string // Mime type of the file
|
||||
CompressionMetadata sgzip.GzipMetadata
|
||||
Mode int // Compression mode of the file.
|
||||
Size int64 // Size of the object.
|
||||
MD5 string // MD5 hash of the file.
|
||||
MimeType string // Mime type of the file
|
||||
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
|
||||
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
|
||||
}
|
||||
|
||||
// Object with external metadata
|
||||
@@ -1107,17 +1080,6 @@ type Object struct {
|
||||
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
||||
}
|
||||
|
||||
// This function generates a metadata object
|
||||
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta := new(ObjectMetadata)
|
||||
meta.Size = size
|
||||
meta.Mode = mode
|
||||
meta.CompressionMetadata = cmeta
|
||||
meta.MD5 = md5
|
||||
meta.MimeType = mimeType
|
||||
return meta
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
// Open our meradata object
|
||||
@@ -1165,7 +1127,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.mo, o.mo.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1278,7 +1240,7 @@ func (o *Object) String() string {
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
origFileName, _, _, err := processFileName(o.Object.Remote())
|
||||
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
|
||||
if err != nil {
|
||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||
return o.Object.Remote()
|
||||
@@ -1381,7 +1343,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -1389,31 +1350,12 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
// Get a chunkedreader for the wrapped object
|
||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||
// Get file handle
|
||||
var file io.Reader
|
||||
if offset != 0 {
|
||||
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
|
||||
} else {
|
||||
file, err = sgzip.NewReader(chunkedReader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
|
||||
var retCloser io.Closer = chunkedReader
|
||||
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
|
||||
@@ -48,7 +48,27 @@ func TestRemoteGzip(t *testing.T) {
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
{Name: name, Key: "mode", Value: "gzip"},
|
||||
{Name: name, Key: "level", Value: "-1"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteZstd tests ZSTD compression
|
||||
func TestRemoteZstd(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
||||
name := "TestCompressZstd"
|
||||
opt := defaultOpt
|
||||
opt.RemoteName = name + ":"
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "mode", Value: "zstd"},
|
||||
{Name: name, Key: "level", Value: "2"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
|
||||
207
backend/compress/gzip_handler.go
Normal file
207
backend/compress/gzip_handler.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/buengese/sgzip"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// gzipModeHandler implements compressionModeHandler for gzip
|
||||
type gzipModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
var n int64
|
||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err = io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
if meta.CompressionMetadataGzip == nil {
|
||||
return 0, errors.New("missing gzip metadata")
|
||||
}
|
||||
return meta.CompressionMetadataGzip.Size, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (g *gzipModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
var file io.Reader
|
||||
|
||||
if offset != 0 {
|
||||
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
||||
} else {
|
||||
file, err = sgzip.NewReader(cr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
if compressionMode == Gzip {
|
||||
return gzFileExt
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (g *gzipModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
||||
go func() {
|
||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||
if err != nil {
|
||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
||||
close(resultsGzip)
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(gz, in)
|
||||
gzErr := gz.Close()
|
||||
if gzErr != nil && err == nil {
|
||||
err = gzErr
|
||||
}
|
||||
closeErr := pipeWriter.Close()
|
||||
if closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
||||
close(resultsGzip)
|
||||
}()
|
||||
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the compressed data.
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
// unwrap the accounting again
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// add the hasher and re-wrap the accounting
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
||||
}
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
// Check whether we got an error during compression
|
||||
result := <-resultsGzip
|
||||
if result.err != nil {
|
||||
if o != nil {
|
||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
||||
}
|
||||
}
|
||||
return nil, nil, result.err
|
||||
}
|
||||
|
||||
// Generate metadata
|
||||
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
|
||||
// Check the hashes of the compressed data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return o, meta, nil
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta, ok := cmeta.(sgzip.GzipMetadata)
|
||||
if !ok {
|
||||
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
||||
}
|
||||
|
||||
objMeta := new(ObjectMetadata)
|
||||
objMeta.Size = size
|
||||
objMeta.Mode = mode
|
||||
objMeta.CompressionMetadataGzip = &meta
|
||||
objMeta.CompressionMetadataZstd = nil
|
||||
objMeta.MD5 = md5
|
||||
objMeta.MimeType = mimeType
|
||||
|
||||
return objMeta
|
||||
}
|
||||
327
backend/compress/szstd_helper.go
Normal file
327
backend/compress/szstd_helper.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
||||
|
||||
// SzstdMetadata holds metadata for szstd compressed files.
|
||||
type SzstdMetadata struct {
|
||||
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
||||
Size int64 // Size is the uncompressed size of the file
|
||||
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
||||
}
|
||||
|
||||
// SzstdWriter is a writer that compresses data in szstd format.
|
||||
type SzstdWriter struct {
|
||||
enc *zstd.Encoder
|
||||
w szstd.ConcurrentWriter
|
||||
metadata SzstdMetadata
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewWriterSzstd creates a new szstd writer with the specified options.
|
||||
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
||||
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
||||
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
||||
encoder, err := zstd.NewWriter(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sw, err := szstd.NewWriter(w, encoder)
|
||||
if err != nil {
|
||||
if err := encoder.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SzstdWriter{
|
||||
enc: encoder,
|
||||
w: sw,
|
||||
metadata: SzstdMetadata{
|
||||
BlockSize: szstdChunkSize,
|
||||
Size: 0,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
||||
// It handles the block size and metadata updates automatically.
|
||||
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if w.metadata.BlockData == nil {
|
||||
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
||||
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
||||
w.metadata.BlockData[0] = 0
|
||||
}
|
||||
|
||||
start := 0
|
||||
total := len(p)
|
||||
|
||||
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
||||
if start >= total {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
end := min(start+w.metadata.BlockSize, total)
|
||||
chunk := p[start:end]
|
||||
size := end - start
|
||||
|
||||
w.mu.Lock()
|
||||
w.metadata.Size += int64(size)
|
||||
w.mu.Unlock()
|
||||
|
||||
start = end
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
// write sizes of compressed blocks in the callback
|
||||
err := w.w.WriteMany(context.Background(), writerFunc,
|
||||
szstd.WithWriteCallback(func(size uint32) {
|
||||
w.mu.Lock()
|
||||
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
||||
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
||||
w.mu.Unlock()
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// Close closes the SzstdWriter and its underlying encoder.
|
||||
func (w *SzstdWriter) Close() error {
|
||||
if err := w.w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.enc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMetadata returns the metadata of the szstd writer.
|
||||
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
||||
return w.metadata
|
||||
}
|
||||
|
||||
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
||||
type SzstdReaderAt struct {
|
||||
r szstd.Reader
|
||||
decoder *zstd.Decoder
|
||||
metadata *SzstdMetadata
|
||||
pos int64
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
||||
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
||||
decoder, err := zstd.NewReader(nil, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := szstd.NewReader(rs, decoder)
|
||||
if err != nil {
|
||||
decoder.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sr := &SzstdReaderAt{
|
||||
r: r,
|
||||
decoder: decoder,
|
||||
metadata: meta,
|
||||
pos: 0,
|
||||
}
|
||||
|
||||
// Set initial position to the provided offset
|
||||
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
||||
if err := sr.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
// Seek sets the offset for the next Read.
|
||||
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
pos, err := s.r.Seek(offset, whence)
|
||||
if err == nil {
|
||||
s.pos = pos
|
||||
}
|
||||
return pos, err
|
||||
}
|
||||
|
||||
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
n, err := s.r.Read(p)
|
||||
if err == nil {
|
||||
s.pos += int64(n)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ReadAt reads data at the specified offset.
|
||||
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
||||
if off < 0 {
|
||||
return 0, errors.New("invalid offset")
|
||||
}
|
||||
if off >= s.metadata.Size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
endOff := min(off+int64(len(p)), s.metadata.Size)
|
||||
|
||||
// Find all blocks covered by the range
|
||||
type blockInfo struct {
|
||||
index int // Block index
|
||||
offsetInBlock int64 // Offset within the block for starting reading
|
||||
bytesToRead int64 // How many bytes to read from this block
|
||||
}
|
||||
|
||||
var blocks []blockInfo
|
||||
uncompressedOffset := int64(0)
|
||||
currentOff := off
|
||||
|
||||
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
||||
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
||||
|
||||
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
||||
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
||||
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
||||
|
||||
blocks = append(blocks, blockInfo{
|
||||
index: i,
|
||||
offsetInBlock: offsetInBlock,
|
||||
bytesToRead: bytesToRead,
|
||||
})
|
||||
|
||||
currentOff += bytesToRead
|
||||
if currentOff >= endOff {
|
||||
break
|
||||
}
|
||||
}
|
||||
uncompressedOffset = blockUncompressedEnd
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Parallel block decoding
|
||||
type decodeResult struct {
|
||||
index int
|
||||
data []byte
|
||||
err error
|
||||
}
|
||||
|
||||
resultCh := make(chan decodeResult, len(blocks))
|
||||
var wg sync.WaitGroup
|
||||
sem := make(chan struct{}, runtime.NumCPU())
|
||||
|
||||
for _, block := range blocks {
|
||||
wg.Add(1)
|
||||
go func(block blockInfo) {
|
||||
defer wg.Done()
|
||||
sem <- struct{}{}
|
||||
defer func() { <-sem }()
|
||||
|
||||
startOffset := int64(s.metadata.BlockData[block.index])
|
||||
endOffset := int64(s.metadata.BlockData[block.index+1])
|
||||
compressedSize := endOffset - startOffset
|
||||
|
||||
compressed := make([]byte, compressedSize)
|
||||
_, err := s.r.ReadAt(compressed, startOffset)
|
||||
if err != nil && err != io.EOF {
|
||||
resultCh <- decodeResult{index: block.index, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
||||
if err != nil {
|
||||
resultCh <- decodeResult{index: block.index, err: err}
|
||||
return
|
||||
}
|
||||
|
||||
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
||||
}(block)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(resultCh)
|
||||
}()
|
||||
|
||||
// Collect results in block index order
|
||||
totalRead := 0
|
||||
results := make(map[int]decodeResult)
|
||||
expected := len(blocks)
|
||||
minIndex := blocks[0].index
|
||||
|
||||
for res := range resultCh {
|
||||
results[res.index] = res
|
||||
for {
|
||||
if result, ok := results[minIndex]; ok {
|
||||
if result.err != nil {
|
||||
return 0, result.err
|
||||
}
|
||||
// find the corresponding blockInfo
|
||||
var blk blockInfo
|
||||
for _, b := range blocks {
|
||||
if b.index == result.index {
|
||||
blk = b
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
start := blk.offsetInBlock
|
||||
end := start + blk.bytesToRead
|
||||
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
||||
totalRead += int(blk.bytesToRead)
|
||||
minIndex++
|
||||
if minIndex-blocks[0].index >= len(blocks) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return totalRead, nil
|
||||
}
|
||||
|
||||
// Close closes the SzstdReaderAt and underlying decoder.
|
||||
func (s *SzstdReaderAt) Close() error {
|
||||
if err := s.r.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
s.decoder.Close()
|
||||
return nil
|
||||
}
|
||||
65
backend/compress/uncompressed_handler.go
Normal file
65
backend/compress/uncompressed_handler.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
)
|
||||
|
||||
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
||||
type uncompressedModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (u *uncompressedModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
return o.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (u *uncompressedModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
return nil
|
||||
}
|
||||
65
backend/compress/unknown_handler.go
Normal file
65
backend/compress/unknown_handler.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
)
|
||||
|
||||
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
||||
type unknownModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (unk *unknownModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (unk *unknownModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return nil, nil, fmt.Errorf("unknown compression mode")
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
return nil
|
||||
}
|
||||
192
backend/compress/zstd_handler.go
Normal file
192
backend/compress/zstd_handler.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package compress
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/zstd"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunkedreader"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// zstdModeHandler implements compressionModeHandler for zstd
|
||||
type zstdModeHandler struct{}
|
||||
|
||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||
// the configured threshold
|
||||
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
||||
var b bytes.Buffer
|
||||
var n int64
|
||||
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
n, err = io.Copy(w, r)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ratio := float64(n) / float64(b.Len())
|
||||
return ratio > minCompressionRatio, nil
|
||||
}
|
||||
|
||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
||||
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
||||
if meta.CompressionMetadataZstd == nil {
|
||||
return 0, errors.New("missing zstd metadata")
|
||||
}
|
||||
return meta.CompressionMetadataZstd.Size, nil
|
||||
}
|
||||
|
||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
||||
func (z *zstdModeHandler) openGetReadCloser(
|
||||
ctx context.Context,
|
||||
o *Object,
|
||||
offset int64,
|
||||
limit int64,
|
||||
cr chunkedreader.ChunkedReader,
|
||||
closer io.Closer,
|
||||
options ...fs.OpenOption,
|
||||
) (rc io.ReadCloser, err error) {
|
||||
var file io.Reader
|
||||
|
||||
if offset != 0 {
|
||||
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
||||
} else {
|
||||
file, err = zstd.NewReader(cr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fileReader io.Reader
|
||||
if limit != -1 {
|
||||
fileReader = io.LimitReader(file, limit)
|
||||
} else {
|
||||
fileReader = file
|
||||
}
|
||||
// Return a ReadCloser
|
||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
||||
}
|
||||
|
||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
||||
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
||||
if compressionMode == Zstd {
|
||||
return zstdFileExt
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
||||
func (z *zstdModeHandler) putCompress(
|
||||
ctx context.Context,
|
||||
f *Fs,
|
||||
in io.Reader,
|
||||
src fs.ObjectInfo,
|
||||
options []fs.OpenOption,
|
||||
mimeType string,
|
||||
) (fs.Object, *ObjectMetadata, error) {
|
||||
// Unwrap reader accounting
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Add the metadata hasher
|
||||
metaHasher := md5.New()
|
||||
in = io.TeeReader(in, metaHasher)
|
||||
|
||||
// Compress the file
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
||||
go func() {
|
||||
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
||||
if err != nil {
|
||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
||||
close(resultsZstd)
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(writer, in)
|
||||
if wErr := writer.Close(); wErr != nil && err == nil {
|
||||
err = wErr
|
||||
}
|
||||
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
||||
err = cErr
|
||||
}
|
||||
|
||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
||||
close(resultsZstd)
|
||||
}()
|
||||
|
||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
||||
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
var err error
|
||||
if ht != hash.None {
|
||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
wrappedIn = wrap(wrappedIn)
|
||||
}
|
||||
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
result := <-resultsZstd
|
||||
if result.err != nil {
|
||||
if o != nil {
|
||||
_ = o.Remove(ctx)
|
||||
}
|
||||
return nil, nil, result.err
|
||||
}
|
||||
|
||||
// Build metadata using uncompressed size for filename
|
||||
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||
if ht != hash.None && hasher != nil {
|
||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return o, meta, nil
|
||||
}
|
||||
|
||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
||||
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
||||
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||
}
|
||||
|
||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
||||
// Warning: This function panics if cmeta is not of the expected type.
|
||||
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
||||
meta, ok := cmeta.(SzstdMetadata)
|
||||
if !ok {
|
||||
panic("invalid cmeta type: expected SzstdMetadata")
|
||||
}
|
||||
|
||||
objMeta := new(ObjectMetadata)
|
||||
objMeta.Size = size
|
||||
objMeta.Mode = mode
|
||||
objMeta.CompressionMetadataGzip = nil
|
||||
objMeta.CompressionMetadataZstd = &meta
|
||||
objMeta.MD5 = md5
|
||||
objMeta.MimeType = mimeType
|
||||
|
||||
return objMeta
|
||||
}
|
||||
@@ -923,28 +923,30 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "encode",
|
||||
Short: "Encode the given filename(s)",
|
||||
Short: "Encode the given filename(s).",
|
||||
Long: `This encodes the filenames given as arguments returning a list of
|
||||
strings of the encoded results.
|
||||
|
||||
Usage Example:
|
||||
Usage examples:
|
||||
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend encode crypt: file1 [file2...]
|
||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||
` + "```",
|
||||
},
|
||||
{
|
||||
Name: "decode",
|
||||
Short: "Decode the given filename(s)",
|
||||
Short: "Decode the given filename(s).",
|
||||
Long: `This decodes the filenames given as arguments returning a list of
|
||||
strings of the decoded results. It will return an error if any of the
|
||||
inputs are invalid.
|
||||
|
||||
Usage Example:
|
||||
Usage examples:
|
||||
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||
` + "```",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -563,21 +563,26 @@ var commandHelp = []fs.CommandHelp{{
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
Usage example:
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend metadata doi:
|
||||
` + "```" + `
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
` + "```console" + `
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
@@ -585,8 +590,7 @@ This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
It doesn't return anything.`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
|
||||
@@ -1965,9 +1965,28 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
entriesAdded := 0
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
@@ -1979,25 +1998,30 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return true
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entriesAdded++
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listREntry is a task to be executed by a litRRunner
|
||||
@@ -3640,41 +3664,47 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
Long: `This is a get command which will be used to fetch the various drive config parameters
|
||||
Short: "Get command for fetching the drive config parameters.",
|
||||
Long: `This is a get command which will be used to fetch the various drive config
|
||||
parameters.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||
` + "```",
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "show the current upload chunk size",
|
||||
"service_account_file": "show the current service account file",
|
||||
"chunk_size": "Show the current upload chunk size.",
|
||||
"service_account_file": "Show the current service account file.",
|
||||
},
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the drive config parameters",
|
||||
Long: `This is a set command which will be used to update the various drive config parameters
|
||||
Short: "Set command for updating the drive config parameters.",
|
||||
Long: `This is a set command which will be used to update the various drive config
|
||||
parameters.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||
` + "```",
|
||||
Opts: map[string]string{
|
||||
"chunk_size": "update the current upload chunk size",
|
||||
"service_account_file": "update the current service account file",
|
||||
"chunk_size": "Update the current upload chunk size.",
|
||||
"service_account_file": "Update the current service account file.",
|
||||
},
|
||||
}, {
|
||||
Name: "shortcut",
|
||||
Short: "Create shortcuts from files or directories",
|
||||
Short: "Create shortcuts from files or directories.",
|
||||
Long: `This command creates shortcuts from files or directories.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```console" + `
|
||||
rclone backend shortcut drive: source_item destination_shortcut
|
||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||
` + "```" + `
|
||||
|
||||
In the first example this creates a shortcut from the "source_item"
|
||||
which can be a file or a directory to the "destination_shortcut". The
|
||||
@@ -3684,90 +3714,100 @@ from "drive:"
|
||||
In the second example this creates a shortcut from the "source_item"
|
||||
relative to "drive:" to the "destination_shortcut" relative to
|
||||
"drive2:". This may fail with a permission error if the user
|
||||
authenticated with "drive2:" can't read files from "drive:".
|
||||
`,
|
||||
authenticated with "drive2:" can't read files from "drive:".`,
|
||||
Opts: map[string]string{
|
||||
"target": "optional target remote for the shortcut destination",
|
||||
"target": "Optional target remote for the shortcut destination.",
|
||||
},
|
||||
}, {
|
||||
Name: "drives",
|
||||
Short: "List the Shared Drives available to this account",
|
||||
Short: "List the Shared Drives available to this account.",
|
||||
Long: `This command lists the Shared Drives (Team Drives) available to this
|
||||
account.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```console" + `
|
||||
rclone backend [-o config] drives drive:
|
||||
` + "```" + `
|
||||
|
||||
This will return a JSON list of objects like this
|
||||
This will return a JSON list of objects like this:
|
||||
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"id": "0ABCDEF-01234567890",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "My Drive"
|
||||
},
|
||||
{
|
||||
"id": "0ABCDEFabcdefghijkl",
|
||||
"kind": "drive#teamDrive",
|
||||
"name": "Test Drive"
|
||||
}
|
||||
]
|
||||
` + "```" + `
|
||||
|
||||
With the -o config parameter it will output the list in a format
|
||||
suitable for adding to a config file to make aliases for all the
|
||||
drives found and a combined drive.
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
` + "```ini" + `
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
` + "```" + `
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
`,
|
||||
drives combined into one directory tree.`,
|
||||
}, {
|
||||
Name: "untrash",
|
||||
Short: "Untrash files and directories",
|
||||
Short: "Untrash files and directories.",
|
||||
Long: `This command untrashes all the files and directories in the directory
|
||||
passed in recursively.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
` + "```" + `
|
||||
|
||||
This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before
|
||||
restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
` + "```json" + `
|
||||
{
|
||||
"Untrashed": 17,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```",
|
||||
}, {
|
||||
Name: "copyid",
|
||||
Short: "Copy files by ID",
|
||||
Long: `This command copies files by ID
|
||||
Short: "Copy files by ID.",
|
||||
Long: `This command copies files by ID.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```console" + `
|
||||
rclone backend copyid drive: ID path
|
||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It copies the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||
@@ -3780,17 +3820,19 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before
|
||||
copying.`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
Short: "Move files by ID.",
|
||||
Long: `This command moves files by ID.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
` + "```console" + `
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
` + "```" + `
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
@@ -3802,58 +3844,65 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
Short: "Dump the export formats for debug purposes.",
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
Short: "Dump the import formats for debug purposes.",
|
||||
}, {
|
||||
Name: "query",
|
||||
Short: "List files using Google Drive query language",
|
||||
Long: `This command lists files based on a query
|
||||
Short: "List files using Google Drive query language.",
|
||||
Long: `This command lists files based on a query.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend query drive: query
|
||||
` + "```" + `
|
||||
|
||||
rclone backend query drive: query
|
||||
|
||||
The query syntax is documented at [Google Drive Search query terms and
|
||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||
|
||||
For example:
|
||||
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
` + "```" + `
|
||||
|
||||
If the query contains literal ' or \ characters, these need to be escaped with
|
||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||
file named "foo ' \.txt":
|
||||
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```console" + `
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
` + "```" + `
|
||||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]
|
||||
` + "```console",
|
||||
}, {
|
||||
Name: "rescue",
|
||||
Short: "Rescue or delete any orphaned files",
|
||||
Short: "Rescue or delete any orphaned files.",
|
||||
Long: `This command rescues or deletes any orphaned files or directories.
|
||||
|
||||
Sometimes files can get orphaned in Google Drive. This means that they
|
||||
@@ -3862,26 +3911,31 @@ are no longer in any folder in Google Drive.
|
||||
This command finds those files and either rescues them to a directory
|
||||
you specify or deletes them.
|
||||
|
||||
Usage:
|
||||
|
||||
This can be used in 3 ways.
|
||||
|
||||
First, list all orphaned files
|
||||
First, list all orphaned files:
|
||||
|
||||
rclone backend rescue drive:
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive:
|
||||
` + "```" + `
|
||||
|
||||
Second rescue all orphaned files to the directory indicated
|
||||
Second rescue all orphaned files to the directory indicated:
|
||||
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||
` + "```" + `
|
||||
|
||||
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||
E.g. to rescue all orphans to a directory called "Orphans" in the top level:
|
||||
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: Orphans
|
||||
` + "```" + `
|
||||
|
||||
Third delete all orphaned files to the trash
|
||||
Third delete all orphaned files to the trash:
|
||||
|
||||
rclone backend rescue drive: -o delete
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend rescue drive: -o delete
|
||||
` + "```",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -4617,6 +4671,7 @@ var (
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -834,7 +835,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
@@ -847,7 +848,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -859,15 +860,15 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
err = callback(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
@@ -875,21 +876,25 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
id = entry.(*fs.Dir).ID()
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return id, nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
@@ -908,7 +913,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
|
||||
|
||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
@@ -921,7 +926,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -933,7 +938,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -946,26 +951,33 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
modTime: *entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = callback(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
o = entry.(*Object)
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return o, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -980,11 +992,37 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles(ctx)
|
||||
err := f.listReceivedFiles(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders(ctx)
|
||||
err := f.listSharedFolders(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
@@ -1014,7 +1052,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -1026,7 +1064,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -1051,14 +1089,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if o.(*Object).exportType.listable() {
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1066,7 +1110,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -1286,6 +1330,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.MoveV2(&arg)
|
||||
switch e := err.(type) {
|
||||
case files.MoveV2APIError:
|
||||
// There seems to be a bit of eventual consistency here which causes this to
|
||||
// fail on just created objects
|
||||
// See: https://github.com/rclone/rclone/issues/8881
|
||||
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
|
||||
fs.Debugf(srcObj, "Retrying move on %v error", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2087,6 +2141,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -456,9 +456,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
if f.opt.SocksProxy != "" || f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
@@ -468,7 +466,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
}
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -1290,7 +1292,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1134,7 +1134,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
// Set the storage class for the destination object if configured
|
||||
var dstObject *storage.Object
|
||||
if f.opt.StorageClass != "" {
|
||||
dstObject = &storage.Object{
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
}
|
||||
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, dstObject)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
@@ -1422,6 +1430,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
// Set the storage class from config if configured
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
object.StorageClass = o.fs.opt.StorageClass
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
|
||||
@@ -43,33 +43,42 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "drop",
|
||||
Short: "Drop cache",
|
||||
Short: "Drop cache.",
|
||||
Long: `Completely drop checksum cache.
|
||||
Usage Example:
|
||||
rclone backend drop hasher:
|
||||
`,
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend drop hasher:
|
||||
` + "```",
|
||||
}, {
|
||||
Name: "dump",
|
||||
Short: "Dump the database",
|
||||
Long: "Dump cache records covered by the current remote",
|
||||
Short: "Dump the database.",
|
||||
Long: "Dump cache records covered by the current remote.",
|
||||
}, {
|
||||
Name: "fulldump",
|
||||
Short: "Full dump of the database",
|
||||
Long: "Dump all cache records in the database",
|
||||
Short: "Full dump of the database.",
|
||||
Long: "Dump all cache records in the database.",
|
||||
}, {
|
||||
Name: "import",
|
||||
Short: "Import a SUM file",
|
||||
Short: "Import a SUM file.",
|
||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||
Usage Example:
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
`,
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||
` + "```",
|
||||
}, {
|
||||
Name: "stickyimport",
|
||||
Short: "Perform fast import of a SUM file",
|
||||
Short: "Perform fast import of a SUM file.",
|
||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||
Usage Example:
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
`,
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||
` + "```",
|
||||
}}
|
||||
|
||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -37,6 +38,10 @@ func init() {
|
||||
Description: "HTTP",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
@@ -98,6 +103,40 @@ sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"cache-control": {
|
||||
Help: "Cache-Control header",
|
||||
Type: "string",
|
||||
Example: "no-cache",
|
||||
},
|
||||
"content-disposition": {
|
||||
Help: "Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "inline",
|
||||
},
|
||||
"content-disposition-filename": {
|
||||
Help: "Filename retrieved from Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "file.txt",
|
||||
},
|
||||
"content-encoding": {
|
||||
Help: "Content-Encoding header",
|
||||
Type: "string",
|
||||
Example: "gzip",
|
||||
},
|
||||
"content-language": {
|
||||
Help: "Content-Language header",
|
||||
Type: "string",
|
||||
Example: "en-US",
|
||||
},
|
||||
"content-type": {
|
||||
Help: "Content-Type header",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
@@ -126,6 +165,13 @@ type Object struct {
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
contentDisposition *string // Content-Disposition: header
|
||||
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
|
||||
cacheControl *string // Cache-Control: header
|
||||
contentEncoding *string // Content-Encoding: header
|
||||
contentLanguage *string // Content-Language: header
|
||||
}
|
||||
|
||||
// statusError returns an error if the res contained an error
|
||||
@@ -277,6 +323,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMetadata: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
@@ -429,6 +476,29 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// parseFilename extracts the filename from a Content-Disposition header
|
||||
func parseFilename(contentDisposition string) (string, error) {
|
||||
// Normalize the contentDisposition to canonical MIME format
|
||||
mediaType, params, err := mime.ParseMediaType(contentDisposition)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
|
||||
}
|
||||
|
||||
// Check if the contentDisposition is an attachment
|
||||
if strings.ToLower(mediaType) != "attachment" {
|
||||
return "", fmt.Errorf("not an attachment: %s", mediaType)
|
||||
}
|
||||
|
||||
// Extract the filename from the parameters
|
||||
filename, ok := params["filename"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("filename not found in contentDisposition")
|
||||
}
|
||||
|
||||
// Decode filename if it contains special encoding
|
||||
return textproto.TrimString(filename), nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func addHeaders(req *http.Request, opt *Options) {
|
||||
for i := 0; i < len(opt.Headers); i += 2 {
|
||||
@@ -577,6 +647,9 @@ func (o *Object) String() string {
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
if o.contentDispositionFilename != nil {
|
||||
return *o.contentDispositionFilename
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
@@ -634,6 +707,29 @@ func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
contentDisposition := res.Header.Get("Content-Disposition")
|
||||
if contentDisposition != "" {
|
||||
o.contentDisposition = &contentDisposition
|
||||
}
|
||||
if o.contentDisposition != nil {
|
||||
var filename string
|
||||
filename, err = parseFilename(*o.contentDisposition)
|
||||
if err == nil && filename != "" {
|
||||
o.contentDispositionFilename = &filename
|
||||
}
|
||||
}
|
||||
cacheControl := res.Header.Get("Cache-Control")
|
||||
if cacheControl != "" {
|
||||
o.cacheControl = &cacheControl
|
||||
}
|
||||
contentEncoding := res.Header.Get("Content-Encoding")
|
||||
if contentEncoding != "" {
|
||||
o.contentEncoding = &contentEncoding
|
||||
}
|
||||
contentLanguage := res.Header.Get("Content-Language")
|
||||
if contentLanguage != "" {
|
||||
o.contentLanguage = &contentLanguage
|
||||
}
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
@@ -722,11 +818,13 @@ var commandHelp = []fs.CommandHelp{{
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running http backend.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||
` + "```console" + `
|
||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||
` + "```" + `
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
@@ -734,8 +832,7 @@ This rebuilds the connection to the http backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
It doesn't return anything.`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -771,6 +868,30 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata = make(fs.Metadata, 6)
|
||||
if o.contentType != "" {
|
||||
metadata["content-type"] = o.contentType
|
||||
}
|
||||
|
||||
// Set system metadata
|
||||
setMetadata := func(k string, v *string) {
|
||||
if v == nil || *v == "" {
|
||||
return
|
||||
}
|
||||
metadata[k] = *v
|
||||
}
|
||||
setMetadata("content-disposition", o.contentDisposition)
|
||||
setMetadata("content-disposition-filename", o.contentDispositionFilename)
|
||||
setMetadata("cache-control", o.cacheControl)
|
||||
setMetadata("content-language", o.contentLanguage)
|
||||
setMetadata("content-encoding", o.contentEncoding)
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -778,4 +899,5 @@ var (
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -60,6 +60,17 @@ func prepareServer(t *testing.T) configmap.Simple {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
|
||||
// Set the content disposition header for the fifth file
|
||||
// later we will check if it is set using the metadata method
|
||||
if r.URL.Path == "/five.txt.gz" {
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Content-Language", "en-US")
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
@@ -102,27 +113,33 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, 4, len(entries))
|
||||
require.Equal(t, 5, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, "five.txt.gz", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok := e.(fs.Directory)
|
||||
_, ok := e.(fs.Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
e = entries[3]
|
||||
assert.Equal(t, "three", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[3]
|
||||
e = entries[4]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
@@ -218,6 +235,23 @@ func TestNewObjectWithLeadingSlash(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestNewObjectWithMetadata(t *testing.T) {
|
||||
f := prepare(t)
|
||||
o, err := f.NewObject(context.Background(), "/five.txt.gz")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "five.txt.gz", o.Remote())
|
||||
ho, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
metadata, err := ho.Metadata(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
|
||||
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
|
||||
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
|
||||
assert.Equal(t, "no-cache", metadata["cache-control"])
|
||||
assert.Equal(t, "en-US", metadata["content-language"])
|
||||
assert.Equal(t, "gzip", metadata["content-encoding"])
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
|
||||
|
||||
BIN
backend/http/test/files/five.txt.gz
Normal file
BIN
backend/http/test/files/five.txt.gz
Normal file
Binary file not shown.
@@ -497,9 +497,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -115,6 +115,17 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_specials",
|
||||
Help: `Don't warn about skipped pipes, sockets and device objects.
|
||||
|
||||
This flag disables warning messages on skipped pipes, sockets and
|
||||
device objects, as you explicitly acknowledge that they should be
|
||||
skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
@@ -328,6 +339,7 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
SkipSpecials bool `config:"skip_specials"`
|
||||
UTFNorm bool `config:"unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
@@ -1058,12 +1070,11 @@ func (f *Fs) Hashes() hash.Set {
|
||||
var commandHelp = []fs.CommandHelp{
|
||||
{
|
||||
Name: "noop",
|
||||
Short: "A null operation for testing backend commands",
|
||||
Long: `This is a test command which has some options
|
||||
you can try to change the output.`,
|
||||
Short: "A null operation for testing backend commands.",
|
||||
Long: `This is a test command which has some options you can try to change the output.`,
|
||||
Opts: map[string]string{
|
||||
"echo": "echo the input arguments",
|
||||
"error": "return an error based on option value",
|
||||
"echo": "Echo the input arguments.",
|
||||
"error": "Return an error based on option value.",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1246,7 +1257,9 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
return false
|
||||
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
fs.Logf(o, "Can't transfer non file/directory")
|
||||
if !o.fs.opt.SkipSpecials {
|
||||
fs.Logf(o, "Can't transfer non file/directory")
|
||||
}
|
||||
return false
|
||||
} else if mode&os.ModeDir != 0 {
|
||||
// fs.Debugf(o, "Skipping directory")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build dragonfly || plan9 || js
|
||||
//go:build dragonfly || plan9 || js || aix
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ Improvements:
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -47,6 +48,9 @@ const (
|
||||
maxSleep = 2 * time.Second
|
||||
eventWaitTime = 500 * time.Millisecond
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
sessionIDConfigKey = "session_id"
|
||||
masterKeyConfigKey = "master_key"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -70,6 +74,24 @@ func init() {
|
||||
Help: "Password.",
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "2fa",
|
||||
Help: `The 2FA code of your MEGA account if the account is set up with one`,
|
||||
Required: false,
|
||||
}, {
|
||||
Name: sessionIDConfigKey,
|
||||
Help: "Session (internal use only)",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: masterKeyConfigKey,
|
||||
Help: "Master key (internal use only)",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}, {
|
||||
Name: "debug",
|
||||
Help: `Output more debug from Mega.
|
||||
@@ -113,6 +135,9 @@ Enabling it will increase CPU usage and add network overhead.`,
|
||||
type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
TwoFA string `config:"2fa"`
|
||||
SessionID string `config:"session_id"`
|
||||
MasterKey string `config:"master_key"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
@@ -209,6 +234,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// Create Fs
|
||||
root = parsePath(root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// cache *mega.Mega on username so we can reuse and share
|
||||
// them between remotes. They are expensive to make as they
|
||||
// contain all the objects and sharing the objects makes the
|
||||
@@ -248,25 +286,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
}
|
||||
|
||||
err := srv.Login(opt.User, opt.Pass)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't login: %w", err)
|
||||
if opt.SessionID == "" {
|
||||
fs.Debugf(f, "Using username and password to initialize the Mega API")
|
||||
err := srv.MultiFactorLogin(opt.User, opt.Pass, opt.TwoFA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't login: %w", err)
|
||||
}
|
||||
megaCache[opt.User] = srv
|
||||
m.Set(sessionIDConfigKey, srv.GetSessionID())
|
||||
encodedMasterKey := base64.StdEncoding.EncodeToString(srv.GetMasterKey())
|
||||
m.Set(masterKeyConfigKey, encodedMasterKey)
|
||||
} else {
|
||||
fs.Debugf(f, "Using previously stored session ID and master key to initialize the Mega API")
|
||||
decodedMasterKey, err := base64.StdEncoding.DecodeString(opt.MasterKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decode master key: %w", err)
|
||||
}
|
||||
err = srv.LoginWithKeys(opt.SessionID, decodedMasterKey)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "login with previous auth keys failed: %v", err)
|
||||
}
|
||||
}
|
||||
megaCache[opt.User] = srv
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: srv,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
f.srv = srv
|
||||
|
||||
// Find the root node and check if it is a file or not
|
||||
_, err = f.findRoot(ctx, false)
|
||||
|
||||
@@ -87,7 +87,7 @@ Please choose the 'y' option to set your own password then enter your secret.`,
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "du",
|
||||
Short: "Return disk usage information for a specified directory",
|
||||
Short: "Return disk usage information for a specified directory.",
|
||||
Long: `The usage information returned, includes the targeted directory as well as all
|
||||
files stored in any sub-directories that may exist.`,
|
||||
}, {
|
||||
@@ -96,7 +96,12 @@ files stored in any sub-directories that may exist.`,
|
||||
Long: `The desired path location (including applicable sub-directories) ending in
|
||||
the object that will be the target of the symlink (for example, /links/mylink).
|
||||
Include the file extension for the object, if applicable.
|
||||
` + "`rclone backend symlink <src> <path>`",
|
||||
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend symlink <src> <path>
|
||||
` + "```",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1377,9 +1377,27 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
|
||||
entry, err := f.itemToDirEntry(ctx, dir, info)
|
||||
@@ -1389,13 +1407,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -3023,6 +3044,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -30,20 +30,25 @@ const (
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object",
|
||||
Short: "change the name of an object.",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
Usage example:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
` + "```",
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads",
|
||||
Short: "List the unfinished multipart uploads.",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
Usage example:
|
||||
|
||||
` + "```console" + `
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
@@ -51,70 +56,82 @@ multipart uploads.
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
`,
|
||||
` + "```json" + `
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
}`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see
|
||||
what it would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
Usage examples:
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
` + "```console" + `
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
` + "```" + `
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
"max-age": "Max age of upload to delete.",
|
||||
},
|
||||
}, {
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
Short: "Restore objects from Archive to Standard storage.",
|
||||
Long: `This command can be used to restore one or more objects from Archive to
|
||||
Standard storage.
|
||||
|
||||
Usage Examples:
|
||||
Usage examples:
|
||||
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
` + "```console" + `
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
` + "```" + `
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```console" + `
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```" + `
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
All the objects shown will be marked for restore, then:
|
||||
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```console" + `
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
` + "```" + `
|
||||
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
It returns a list of status dictionaries with Object Name and Status keys.
|
||||
The Status will be "RESTORED"" if it was successful or an error message if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
`,
|
||||
` + "```json" + `
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
` + "```",
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
"hours": `The number of hours for which this object will be restored.
|
||||
Default is 24 hrs.`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -629,11 +629,31 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
||||
entries = append(entries, o)
|
||||
return nil
|
||||
return list.Add(o)
|
||||
})
|
||||
return entries, err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1377,6 +1397,8 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestLinkValid(t *testing.T) {
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparsable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
|
||||
@@ -1678,39 +1678,43 @@ func (f *Fs) decompressDir(ctx context.Context, filename, id, password string, s
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "addurl",
|
||||
Short: "Add offline download task for url",
|
||||
Short: "Add offline download task for url.",
|
||||
Long: `This command adds offline download task for url.
|
||||
|
||||
Usage:
|
||||
Usage example:
|
||||
|
||||
rclone backend addurl pikpak:dirpath url
|
||||
` + "```console" + `
|
||||
rclone backend addurl pikpak:dirpath url
|
||||
` + "```" + `
|
||||
|
||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||
download will fallback to default 'My Pack' folder.
|
||||
`,
|
||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||
download will fallback to default 'My Pack' folder.`,
|
||||
}, {
|
||||
Name: "decompress",
|
||||
Short: "Request decompress of a file/files in a folder",
|
||||
Short: "Request decompress of a file/files in a folder.",
|
||||
Long: `This command requests decompress of file/files in a folder.
|
||||
|
||||
Usage:
|
||||
Usage examples:
|
||||
|
||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||
` + "```console" + `
|
||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||
` + "```" + `
|
||||
|
||||
An optional argument 'filename' can be specified for a file located in
|
||||
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
||||
password-protected files. Also, pass '-o delete-src-file' to delete
|
||||
An optional argument 'filename' can be specified for a file located in
|
||||
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
||||
password-protected files. Also, pass '-o delete-src-file' to delete
|
||||
source files after decompression finished.
|
||||
|
||||
Result:
|
||||
|
||||
{
|
||||
"Decompressed": 17,
|
||||
"SourceDeleted": 0,
|
||||
"Errors": 0
|
||||
}
|
||||
`,
|
||||
` + "```json" + `
|
||||
{
|
||||
"Decompressed": 17,
|
||||
"SourceDeleted": 0,
|
||||
"Errors": 0
|
||||
}
|
||||
` + "```",
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
|
||||
217
backend/s3/README.md
Normal file
217
backend/s3/README.md
Normal file
@@ -0,0 +1,217 @@
|
||||
# Adding a new s3 provider
|
||||
|
||||
It is quite easy to add a new S3 provider to rclone.
|
||||
|
||||
You'll then need to do add the following (optional tags are in [] and
|
||||
do not get displayed in rclone config if empty):
|
||||
|
||||
The process is as follows: Create yaml -> add docs -> run tests ->
|
||||
adjust yaml until tests pass.
|
||||
|
||||
All tags can be found in `backend/s3/providers.go` Provider Struct.
|
||||
Looking through a few of the yaml files as examples should make things
|
||||
clear. `AWS.yaml` as the most config. pasting.
|
||||
|
||||
## YAML
|
||||
|
||||
In `backend/s3/provider/YourProvider.yaml`
|
||||
|
||||
- name
|
||||
- description
|
||||
- More like the full name often "YourProvider + Object Storage"
|
||||
- [Region]
|
||||
- Any regions your provider supports or the defaults (use `region: {}` for this)
|
||||
- Example from AWS.yaml:
|
||||
|
||||
```yaml
|
||||
region:
|
||||
us-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
US Region, Northern Virginia, or Pacific Northwest.
|
||||
Leave location constraint empty.
|
||||
```
|
||||
|
||||
- The defaults (as seen in Rclone.yaml):
|
||||
|
||||
```yaml
|
||||
region:
|
||||
"": |-
|
||||
Use this if unsure.
|
||||
Will use v4 signatures and an empty region.
|
||||
other-v2-signature: |-
|
||||
Use this only if v4 signatures don't work.
|
||||
E.g. pre Jewel/v10 CEPH.
|
||||
```
|
||||
|
||||
- [Endpoint]
|
||||
- Any endpoints your provider supports
|
||||
|
||||
- Example from Mega.yaml
|
||||
|
||||
```yaml
|
||||
endpoint:
|
||||
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
||||
```
|
||||
|
||||
- [Location Constraint]
|
||||
- The Location Constraint of your remote, often same as region.
|
||||
- Example from AWS.yaml
|
||||
|
||||
```yaml
|
||||
location_constraint:
|
||||
"": Empty for US Region, Northern Virginia, or Pacific Northwest
|
||||
us-east-2: US East (Ohio) Region
|
||||
```
|
||||
|
||||
- [ACL]
|
||||
- Identical across *most* providers. Select the default with `acl: {}`
|
||||
- Example from AWS.yaml
|
||||
|
||||
```yaml
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
```
|
||||
|
||||
- [Storage Class]
|
||||
- Identical across *most* providers.
|
||||
- Defaults from AWS.yaml
|
||||
|
||||
```yaml
|
||||
storage_class:
|
||||
"": Default
|
||||
STANDARD: Standard storage class
|
||||
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
||||
STANDARD_IA: Standard Infrequent Access storage class
|
||||
ONEZONE_IA: One Zone Infrequent Access storage class
|
||||
GLACIER: Glacier Flexible Retrieval storage class
|
||||
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
||||
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
```
|
||||
|
||||
- [Server Side Encryption]
|
||||
- Not common, identical across *most* providers.
|
||||
- Defaults from AWS.yaml
|
||||
|
||||
```yaml
|
||||
server_side_encryption:
|
||||
"": None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
```
|
||||
|
||||
- [Advanced Options]
|
||||
- All advanced options are Boolean - if true the configurator asks about that
|
||||
value, if not it doesn't:
|
||||
|
||||
```go
|
||||
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
||||
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
||||
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
||||
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
||||
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
||||
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
||||
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
||||
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
||||
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
||||
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
||||
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
||||
```
|
||||
|
||||
- Example from AWS.yaml:
|
||||
|
||||
```yaml
|
||||
bucket_acl: true
|
||||
directory_bucket: true
|
||||
leave_parts_on_error: true
|
||||
requester_pays: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
sts_endpoint: true
|
||||
use_accelerate_endpoint: true
|
||||
```
|
||||
|
||||
- Quirks
|
||||
- Quirks are discovered through documentation and running the tests as seen below.
|
||||
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
|
||||
|
||||
```go
|
||||
type Quirks struct {
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
- Example from AWS.yaml
|
||||
|
||||
```yaml
|
||||
quirks:
|
||||
might_gzip: false # Never auto gzips objects
|
||||
use_unsigned_payload: false # AWS has trailer support
|
||||
```
|
||||
|
||||
Note that if you omit a section, eg `region` then the user won't be
|
||||
asked that question, and if you add an empty section e.g. `region: {}`
|
||||
then the defaults from the `Other.yaml` will be used.
|
||||
|
||||
## DOCS
|
||||
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Make sure this is in alphabetical order in the `Providers` section.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- Rule of thumb: don't edit anything not mentioned above.
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- This will make autogenerated changes!
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
## TESTS
|
||||
|
||||
Once you've written the code, test `rclone config` works to your
|
||||
satisfaction and looks correct, and check the integration tests work
|
||||
`go test -v -remote NewS3Provider:`. You may need to adjust the quirks
|
||||
to get them to pass. Some providers just can't pass the tests with
|
||||
control characters in the names so if these fail and the provider
|
||||
doesn't support `urlEncodeListings` in the quirks then ignore them.
|
||||
140
backend/s3/provider/AWS.yaml
Normal file
140
backend/s3/provider/AWS.yaml
Normal file
@@ -0,0 +1,140 @@
|
||||
name: AWS
|
||||
description: Amazon Web Services (AWS) S3
|
||||
region:
|
||||
us-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
US Region, Northern Virginia, or Pacific Northwest.
|
||||
Leave location constraint empty.
|
||||
us-east-2: |-
|
||||
US East (Ohio) Region.
|
||||
Needs location constraint us-east-2.
|
||||
us-west-1: |-
|
||||
US West (Northern California) Region.
|
||||
Needs location constraint us-west-1.
|
||||
us-west-2: |-
|
||||
US West (Oregon) Region.
|
||||
Needs location constraint us-west-2.
|
||||
ca-central-1: |-
|
||||
Canada (Central) Region.
|
||||
Needs location constraint ca-central-1.
|
||||
eu-west-1: |-
|
||||
EU (Ireland) Region.
|
||||
Needs location constraint EU or eu-west-1.
|
||||
eu-west-2: |-
|
||||
EU (London) Region.
|
||||
Needs location constraint eu-west-2.
|
||||
eu-west-3: |-
|
||||
EU (Paris) Region.
|
||||
Needs location constraint eu-west-3.
|
||||
eu-north-1: |-
|
||||
EU (Stockholm) Region.
|
||||
Needs location constraint eu-north-1.
|
||||
eu-south-1: |-
|
||||
EU (Milan) Region.
|
||||
Needs location constraint eu-south-1.
|
||||
eu-central-1: |-
|
||||
EU (Frankfurt) Region.
|
||||
Needs location constraint eu-central-1.
|
||||
ap-southeast-1: |-
|
||||
Asia Pacific (Singapore) Region.
|
||||
Needs location constraint ap-southeast-1.
|
||||
ap-southeast-2: |-
|
||||
Asia Pacific (Sydney) Region.
|
||||
Needs location constraint ap-southeast-2.
|
||||
ap-northeast-1: |-
|
||||
Asia Pacific (Tokyo) Region.
|
||||
Needs location constraint ap-northeast-1.
|
||||
ap-northeast-2: |-
|
||||
Asia Pacific (Seoul).
|
||||
Needs location constraint ap-northeast-2.
|
||||
ap-northeast-3: |-
|
||||
Asia Pacific (Osaka-Local).
|
||||
Needs location constraint ap-northeast-3.
|
||||
ap-south-1: |-
|
||||
Asia Pacific (Mumbai).
|
||||
Needs location constraint ap-south-1.
|
||||
ap-east-1: |-
|
||||
Asia Pacific (Hong Kong) Region.
|
||||
Needs location constraint ap-east-1.
|
||||
sa-east-1: |-
|
||||
South America (Sao Paulo) Region.
|
||||
Needs location constraint sa-east-1.
|
||||
il-central-1: |-
|
||||
Israel (Tel Aviv) Region.
|
||||
Needs location constraint il-central-1.
|
||||
me-south-1: |-
|
||||
Middle East (Bahrain) Region.
|
||||
Needs location constraint me-south-1.
|
||||
af-south-1: |-
|
||||
Africa (Cape Town) Region.
|
||||
Needs location constraint af-south-1.
|
||||
cn-north-1: |-
|
||||
China (Beijing) Region.
|
||||
Needs location constraint cn-north-1.
|
||||
cn-northwest-1: |-
|
||||
China (Ningxia) Region.
|
||||
Needs location constraint cn-northwest-1.
|
||||
us-gov-east-1: |-
|
||||
AWS GovCloud (US-East) Region.
|
||||
Needs location constraint us-gov-east-1.
|
||||
us-gov-west-1: |-
|
||||
AWS GovCloud (US) Region.
|
||||
Needs location constraint us-gov-west-1.
|
||||
endpoint: {}
|
||||
location_constraint:
|
||||
'': Empty for US Region, Northern Virginia, or Pacific Northwest
|
||||
us-east-2: US East (Ohio) Region
|
||||
us-west-1: US West (Northern California) Region
|
||||
us-west-2: US West (Oregon) Region
|
||||
ca-central-1: Canada (Central) Region
|
||||
eu-west-1: EU (Ireland) Region
|
||||
eu-west-2: EU (London) Region
|
||||
eu-west-3: EU (Paris) Region
|
||||
eu-north-1: EU (Stockholm) Region
|
||||
eu-south-1: EU (Milan) Region
|
||||
EU: EU Region
|
||||
ap-southeast-1: Asia Pacific (Singapore) Region
|
||||
ap-southeast-2: Asia Pacific (Sydney) Region
|
||||
ap-northeast-1: Asia Pacific (Tokyo) Region
|
||||
ap-northeast-2: Asia Pacific (Seoul) Region
|
||||
ap-northeast-3: Asia Pacific (Osaka-Local) Region
|
||||
ap-south-1: Asia Pacific (Mumbai) Region
|
||||
ap-east-1: Asia Pacific (Hong Kong) Region
|
||||
sa-east-1: South America (Sao Paulo) Region
|
||||
il-central-1: Israel (Tel Aviv) Region
|
||||
me-south-1: Middle East (Bahrain) Region
|
||||
af-south-1: Africa (Cape Town) Region
|
||||
cn-north-1: China (Beijing) Region
|
||||
cn-northwest-1: China (Ningxia) Region
|
||||
us-gov-east-1: AWS GovCloud (US-East) Region
|
||||
us-gov-west-1: AWS GovCloud (US) Region
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
||||
STANDARD_IA: Standard Infrequent Access storage class
|
||||
ONEZONE_IA: One Zone Infrequent Access storage class
|
||||
GLACIER: Glacier Flexible Retrieval storage class
|
||||
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
||||
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
directory_bucket: true
|
||||
leave_parts_on_error: true
|
||||
requester_pays: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
sts_endpoint: true
|
||||
use_accelerate_endpoint: true
|
||||
quirks:
|
||||
might_gzip: false # Never auto gzips objects
|
||||
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
|
||||
use_data_integrity_protections: true
|
||||
37
backend/s3/provider/Alibaba.yaml
Normal file
37
backend/s3/provider/Alibaba.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Alibaba
|
||||
description: Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
endpoint:
|
||||
oss-accelerate.aliyuncs.com: Global Accelerate
|
||||
oss-accelerate-overseas.aliyuncs.com: Global Accelerate (outside mainland China)
|
||||
oss-cn-hangzhou.aliyuncs.com: East China 1 (Hangzhou)
|
||||
oss-cn-shanghai.aliyuncs.com: East China 2 (Shanghai)
|
||||
oss-cn-qingdao.aliyuncs.com: North China 1 (Qingdao)
|
||||
oss-cn-beijing.aliyuncs.com: North China 2 (Beijing)
|
||||
oss-cn-zhangjiakou.aliyuncs.com: North China 3 (Zhangjiakou)
|
||||
oss-cn-huhehaote.aliyuncs.com: North China 5 (Hohhot)
|
||||
oss-cn-wulanchabu.aliyuncs.com: North China 6 (Ulanqab)
|
||||
oss-cn-shenzhen.aliyuncs.com: South China 1 (Shenzhen)
|
||||
oss-cn-heyuan.aliyuncs.com: South China 2 (Heyuan)
|
||||
oss-cn-guangzhou.aliyuncs.com: South China 3 (Guangzhou)
|
||||
oss-cn-chengdu.aliyuncs.com: West China 1 (Chengdu)
|
||||
oss-cn-hongkong.aliyuncs.com: Hong Kong (Hong Kong)
|
||||
oss-us-west-1.aliyuncs.com: US West 1 (Silicon Valley)
|
||||
oss-us-east-1.aliyuncs.com: US East 1 (Virginia)
|
||||
oss-ap-southeast-1.aliyuncs.com: Southeast Asia Southeast 1 (Singapore)
|
||||
oss-ap-southeast-2.aliyuncs.com: Asia Pacific Southeast 2 (Sydney)
|
||||
oss-ap-southeast-3.aliyuncs.com: Southeast Asia Southeast 3 (Kuala Lumpur)
|
||||
oss-ap-southeast-5.aliyuncs.com: Asia Pacific Southeast 5 (Jakarta)
|
||||
oss-ap-northeast-1.aliyuncs.com: Asia Pacific Northeast 1 (Japan)
|
||||
oss-ap-south-1.aliyuncs.com: Asia Pacific South 1 (Mumbai)
|
||||
oss-eu-central-1.aliyuncs.com: Central Europe 1 (Frankfurt)
|
||||
oss-eu-west-1.aliyuncs.com: West Europe (London)
|
||||
oss-me-east-1.aliyuncs.com: Middle East 1 (Dubai)
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
GLACIER: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false # multipar ETags differ from AWS
|
||||
19
backend/s3/provider/ArvanCloud.yaml
Normal file
19
backend/s3/provider/ArvanCloud.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: ArvanCloud
|
||||
description: Arvan Cloud Object Storage (AOS)
|
||||
endpoint:
|
||||
s3.ir-thr-at1.arvanstorage.ir: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
Tehran Iran (Simin)
|
||||
s3.ir-tbz-sh1.arvanstorage.ir: Tabriz Iran (Shahriar)
|
||||
location_constraint:
|
||||
ir-thr-at1: Tehran Iran (Simin)
|
||||
ir-tbz-sh1: Tabriz Iran (Shahriar)
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
20
backend/s3/provider/Ceph.yaml
Normal file
20
backend/s3/provider/Ceph.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Ceph
|
||||
description: Ceph Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
98
backend/s3/provider/ChinaMobile.yaml
Normal file
98
backend/s3/provider/ChinaMobile.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
name: ChinaMobile
|
||||
description: China Mobile Ecloud Elastic Object Storage (EOS)
|
||||
endpoint:
|
||||
eos-wuxi-1.cmecloud.cn: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
East China (Suzhou)
|
||||
eos-jinan-1.cmecloud.cn: East China (Jinan)
|
||||
eos-ningbo-1.cmecloud.cn: East China (Hangzhou)
|
||||
eos-shanghai-1.cmecloud.cn: East China (Shanghai-1)
|
||||
eos-zhengzhou-1.cmecloud.cn: Central China (Zhengzhou)
|
||||
eos-hunan-1.cmecloud.cn: Central China (Changsha-1)
|
||||
eos-zhuzhou-1.cmecloud.cn: Central China (Changsha-2)
|
||||
eos-guangzhou-1.cmecloud.cn: South China (Guangzhou-2)
|
||||
eos-dongguan-1.cmecloud.cn: South China (Guangzhou-3)
|
||||
eos-beijing-1.cmecloud.cn: North China (Beijing-1)
|
||||
eos-beijing-2.cmecloud.cn: North China (Beijing-2)
|
||||
eos-beijing-4.cmecloud.cn: North China (Beijing-3)
|
||||
eos-huhehaote-1.cmecloud.cn: North China (Huhehaote)
|
||||
eos-chengdu-1.cmecloud.cn: Southwest China (Chengdu)
|
||||
eos-chongqing-1.cmecloud.cn: Southwest China (Chongqing)
|
||||
eos-guiyang-1.cmecloud.cn: Southwest China (Guiyang)
|
||||
eos-xian-1.cmecloud.cn: Nouthwest China (Xian)
|
||||
eos-yunnan.cmecloud.cn: Yunnan China (Kunming)
|
||||
eos-yunnan-2.cmecloud.cn: Yunnan China (Kunming-2)
|
||||
eos-tianjin-1.cmecloud.cn: Tianjin China (Tianjin)
|
||||
eos-jilin-1.cmecloud.cn: Jilin China (Changchun)
|
||||
eos-hubei-1.cmecloud.cn: Hubei China (Xiangyan)
|
||||
eos-jiangxi-1.cmecloud.cn: Jiangxi China (Nanchang)
|
||||
eos-gansu-1.cmecloud.cn: Gansu China (Lanzhou)
|
||||
eos-shanxi-1.cmecloud.cn: Shanxi China (Taiyuan)
|
||||
eos-liaoning-1.cmecloud.cn: Liaoning China (Shenyang)
|
||||
eos-hebei-1.cmecloud.cn: Hebei China (Shijiazhuang)
|
||||
eos-fujian-1.cmecloud.cn: Fujian China (Xiamen)
|
||||
eos-guangxi-1.cmecloud.cn: Guangxi China (Nanning)
|
||||
eos-anhui-1.cmecloud.cn: Anhui China (Huainan)
|
||||
location_constraint:
|
||||
wuxi1: East China (Suzhou)
|
||||
jinan1: East China (Jinan)
|
||||
ningbo1: East China (Hangzhou)
|
||||
shanghai1: East China (Shanghai-1)
|
||||
zhengzhou1: Central China (Zhengzhou)
|
||||
hunan1: Central China (Changsha-1)
|
||||
zhuzhou1: Central China (Changsha-2)
|
||||
guangzhou1: South China (Guangzhou-2)
|
||||
dongguan1: South China (Guangzhou-3)
|
||||
beijing1: North China (Beijing-1)
|
||||
beijing2: North China (Beijing-2)
|
||||
beijing4: North China (Beijing-3)
|
||||
huhehaote1: North China (Huhehaote)
|
||||
chengdu1: Southwest China (Chengdu)
|
||||
chongqing1: Southwest China (Chongqing)
|
||||
guiyang1: Southwest China (Guiyang)
|
||||
xian1: Northwest China (Xian)
|
||||
yunnan: Yunnan China (Kunming)
|
||||
yunnan2: Yunnan China (Kunming-2)
|
||||
tianjin1: Tianjin China (Tianjin)
|
||||
jilin1: Jilin China (Changchun)
|
||||
hubei1: Hubei China (Xiangyan)
|
||||
jiangxi1: Jiangxi China (Nanchang)
|
||||
gansu1: Gansu China (Lanzhou)
|
||||
shanxi1: Shanxi China (Taiyuan)
|
||||
liaoning1: Liaoning China (Shenyang)
|
||||
hebei1: Hebei China (Shijiazhuang)
|
||||
fujian1: Fujian China (Xiamen)
|
||||
guangxi1: Guangxi China (Nanning)
|
||||
anhui1: Anhui China (Huainan)
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
GLACIER: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
8
backend/s3/provider/Cloudflare.yaml
Normal file
8
backend/s3/provider/Cloudflare.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: Cloudflare
|
||||
description: Cloudflare R2 Storage
|
||||
region:
|
||||
auto: R2 buckets are automatically distributed across Cloudflare's data centers for low latency.
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_multipart_etag: false # multipart ETags are random
|
||||
10
backend/s3/provider/Cubbit.yaml
Normal file
10
backend/s3/provider/Cubbit.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Cubbit
|
||||
description: Cubbit DS3 Object Storage
|
||||
region:
|
||||
eu-west-1: Europe West
|
||||
endpoint:
|
||||
s3.cubbit.eu: Cubbit DS3 Object Storage endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
20
backend/s3/provider/DigitalOcean.yaml
Normal file
20
backend/s3/provider/DigitalOcean.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: DigitalOcean
|
||||
description: DigitalOcean Spaces
|
||||
region: {}
|
||||
endpoint:
|
||||
syd1.digitaloceanspaces.com: DigitalOcean Spaces Sydney 1
|
||||
sfo3.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 3
|
||||
sfo2.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 2
|
||||
fra1.digitaloceanspaces.com: DigitalOcean Spaces Frankfurt 1
|
||||
nyc3.digitaloceanspaces.com: DigitalOcean Spaces New York 3
|
||||
ams3.digitaloceanspaces.com: DigitalOcean Spaces Amsterdam 3
|
||||
sgp1.digitaloceanspaces.com: DigitalOcean Spaces Singapore 1
|
||||
lon1.digitaloceanspaces.com: DigitalOcean Spaces London 1
|
||||
tor1.digitaloceanspaces.com: DigitalOcean Spaces Toronto 1
|
||||
blr1.digitaloceanspaces.com: DigitalOcean Spaces Bangalore 1
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Dreamhost.yaml
Normal file
11
backend/s3/provider/Dreamhost.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Dreamhost
|
||||
description: Dreamhost DreamObjects
|
||||
region: {}
|
||||
endpoint:
|
||||
objects-us-east-1.dream.io: Dream Objects endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
9
backend/s3/provider/Exaba.yaml
Normal file
9
backend/s3/provider/Exaba.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: Exaba
|
||||
description: Exaba Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
21
backend/s3/provider/FileLu.yaml
Normal file
21
backend/s3/provider/FileLu.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
name: FileLu
|
||||
description: FileLu S5 (S3-Compatible Object Storage)
|
||||
region:
|
||||
global: Global
|
||||
us-east: North America (US-East)
|
||||
eu-central: Europe (EU-Central)
|
||||
ap-southeast: Asia Pacific (AP-Southeast)
|
||||
me-central: Middle East (ME-Central)
|
||||
endpoint:
|
||||
s5lu.com: Global FileLu S5 endpoint
|
||||
us.s5lu.com: North America (US-East) region endpoint
|
||||
eu.s5lu.com: Europe (EU-Central) region endpoint
|
||||
ap.s5lu.com: Asia Pacific (AP-Southeast) region endpoint
|
||||
me.s5lu.com: Middle East (ME-Central) region endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
6
backend/s3/provider/FlashBlade.yaml
Normal file
6
backend/s3/provider/FlashBlade.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: FlashBlade
|
||||
description: Pure Storage FlashBlade Object Storage
|
||||
endpoint: {}
|
||||
quirks:
|
||||
might_gzip: false # never auto-gzip
|
||||
force_path_style: true # supports vhost but defaults to path-style
|
||||
20
backend/s3/provider/GCS.yaml
Normal file
20
backend/s3/provider/GCS.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: GCS
|
||||
description: Google Cloud Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
https://storage.googleapis.com: Google Cloud Storage endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# Google break request Signature by mutating accept-encoding HTTP header
|
||||
# https://github.com/rclone/rclone/issues/6670
|
||||
use_accept_encoding_gzip: false
|
||||
sign_accept_encoding: false
|
||||
use_already_exists: true # returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
# GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
use_x_id: false
|
||||
# GCS S3 doesn't support multi-part server side copy:
|
||||
# See: https://issuetracker.google.com/issues/323465186
|
||||
# So make cutoff very large which it does seem to support
|
||||
copy_cutoff: 9223372036854775807
|
||||
15
backend/s3/provider/Hetzner.yaml
Normal file
15
backend/s3/provider/Hetzner.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Hetzner
|
||||
description: Hetzner Object Storage
|
||||
region:
|
||||
hel1: Helsinki
|
||||
fsn1: Falkenstein
|
||||
nbg1: Nuremberg
|
||||
endpoint:
|
||||
hel1.your-objectstorage.com: Helsinki
|
||||
fsn1.your-objectstorage.com: Falkenstein
|
||||
nbg1.your-objectstorage.com: Nuremberg
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
41
backend/s3/provider/HuaweiOBS.yaml
Normal file
41
backend/s3/provider/HuaweiOBS.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
name: HuaweiOBS
|
||||
description: Huawei Object Storage Service
|
||||
region:
|
||||
af-south-1: AF-Johannesburg
|
||||
ap-southeast-2: AP-Bangkok
|
||||
ap-southeast-3: AP-Singapore
|
||||
cn-east-3: CN East-Shanghai1
|
||||
cn-east-2: CN East-Shanghai2
|
||||
cn-north-1: CN North-Beijing1
|
||||
cn-north-4: CN North-Beijing4
|
||||
cn-south-1: CN South-Guangzhou
|
||||
ap-southeast-1: CN-Hong Kong
|
||||
sa-argentina-1: LA-Buenos Aires1
|
||||
sa-peru-1: LA-Lima1
|
||||
na-mexico-1: LA-Mexico City1
|
||||
sa-chile-1: LA-Santiago2
|
||||
sa-brazil-1: LA-Sao Paulo1
|
||||
ru-northwest-2: RU-Moscow2
|
||||
endpoint:
|
||||
obs.af-south-1.myhuaweicloud.com: AF-Johannesburg
|
||||
obs.ap-southeast-2.myhuaweicloud.com: AP-Bangkok
|
||||
obs.ap-southeast-3.myhuaweicloud.com: AP-Singapore
|
||||
obs.cn-east-3.myhuaweicloud.com: CN East-Shanghai1
|
||||
obs.cn-east-2.myhuaweicloud.com: CN East-Shanghai2
|
||||
obs.cn-north-1.myhuaweicloud.com: CN North-Beijing1
|
||||
obs.cn-north-4.myhuaweicloud.com: CN North-Beijing4
|
||||
obs.cn-south-1.myhuaweicloud.com: CN South-Guangzhou
|
||||
obs.ap-southeast-1.myhuaweicloud.com: CN-Hong Kong
|
||||
obs.sa-argentina-1.myhuaweicloud.com: LA-Buenos Aires1
|
||||
obs.sa-peru-1.myhuaweicloud.com: LA-Lima1
|
||||
obs.na-mexico-1.myhuaweicloud.com: LA-Mexico City1
|
||||
obs.sa-chile-1.myhuaweicloud.com: LA-Santiago2
|
||||
obs.sa-brazil-1.myhuaweicloud.com: LA-Sao Paulo1
|
||||
obs.ru-northwest-2.myhuaweicloud.com: RU-Moscow2
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||
list_url_encode: false
|
||||
list_version: 1
|
||||
use_already_exists: false
|
||||
126
backend/s3/provider/IBMCOS.yaml
Normal file
126
backend/s3/provider/IBMCOS.yaml
Normal file
@@ -0,0 +1,126 @@
|
||||
name: IBMCOS
|
||||
description: IBM COS S3
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.us.cloud-object-storage.appdomain.cloud: US Cross Region Endpoint
|
||||
s3.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Endpoint
|
||||
s3.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Endpoint
|
||||
s3.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Endpoint
|
||||
s3.private.us.cloud-object-storage.appdomain.cloud: US Cross Region Private Endpoint
|
||||
s3.private.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Private Endpoint
|
||||
s3.private.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Private Endpoint
|
||||
s3.private.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Private Endpoint
|
||||
s3.us-east.cloud-object-storage.appdomain.cloud: US Region East Endpoint
|
||||
s3.private.us-east.cloud-object-storage.appdomain.cloud: US Region East Private Endpoint
|
||||
s3.us-south.cloud-object-storage.appdomain.cloud: US Region South Endpoint
|
||||
s3.private.us-south.cloud-object-storage.appdomain.cloud: US Region South Private Endpoint
|
||||
s3.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Endpoint
|
||||
s3.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Endpoint
|
||||
s3.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Endpoint
|
||||
s3.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Endpoint
|
||||
s3.private.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Private Endpoint
|
||||
s3.private.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Private Endpoint
|
||||
s3.private.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Private Endpoint
|
||||
s3.private.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Private Endpoint
|
||||
s3.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Endpoint
|
||||
s3.private.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Private Endpoint
|
||||
s3.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Endpoint
|
||||
s3.private.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Private Endpoint
|
||||
s3.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Endpoint
|
||||
s3.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Endpoint
|
||||
s3.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Endpoint
|
||||
s3.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Endpoint
|
||||
s3.private.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Private Endpoint
|
||||
s3.private.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Private Endpoint
|
||||
s3.private.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Private Endpoint
|
||||
s3.private.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Private Endpoint
|
||||
s3.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Endpoint
|
||||
s3.private.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Private Endpoint
|
||||
s3.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Endpoint
|
||||
s3.private.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Private Endpoint
|
||||
s3.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Endpoint
|
||||
s3.private.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Private Endpoint
|
||||
s3.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Endpoint
|
||||
s3.private.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Private Endpoint
|
||||
s3.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Endpoint
|
||||
s3.private.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Private Endpoint
|
||||
s3.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Endpoint
|
||||
s3.private.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Private Endpoint
|
||||
s3.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Endpoint
|
||||
s3.private.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Private Endpoint
|
||||
s3.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Endpoint
|
||||
s3.private.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Private Endpoint
|
||||
s3.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Endpoint
|
||||
s3.private.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Private Endpoint
|
||||
s3.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Endpoint
|
||||
s3.private.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Private Endpoint
|
||||
s3.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Endpoint
|
||||
s3.private.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Private Endpoint
|
||||
s3.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Endpoint
|
||||
s3.private.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Private Endpoint
|
||||
s3.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Endpoint
|
||||
s3.private.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Private Endpoint
|
||||
s3.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Endpoint
|
||||
s3.private.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Private Endpoint
|
||||
s3.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Endpoint
|
||||
s3.private.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Private Endpoint
|
||||
location_constraint:
|
||||
us-standard: US Cross Region Standard
|
||||
us-vault: US Cross Region Vault
|
||||
us-cold: US Cross Region Cold
|
||||
us-flex: US Cross Region Flex
|
||||
us-east-standard: US East Region Standard
|
||||
us-east-vault: US East Region Vault
|
||||
us-east-cold: US East Region Cold
|
||||
us-east-flex: US East Region Flex
|
||||
us-south-standard: US South Region Standard
|
||||
us-south-vault: US South Region Vault
|
||||
us-south-cold: US South Region Cold
|
||||
us-south-flex: US South Region Flex
|
||||
eu-standard: EU Cross Region Standard
|
||||
eu-vault: EU Cross Region Vault
|
||||
eu-cold: EU Cross Region Cold
|
||||
eu-flex: EU Cross Region Flex
|
||||
eu-gb-standard: Great Britain Standard
|
||||
eu-gb-vault: Great Britain Vault
|
||||
eu-gb-cold: Great Britain Cold
|
||||
eu-gb-flex: Great Britain Flex
|
||||
ap-standard: APAC Standard
|
||||
ap-vault: APAC Vault
|
||||
ap-cold: APAC Cold
|
||||
ap-flex: APAC Flex
|
||||
mel01-standard: Melbourne Standard
|
||||
mel01-vault: Melbourne Vault
|
||||
mel01-cold: Melbourne Cold
|
||||
mel01-flex: Melbourne Flex
|
||||
tor01-standard: Toronto Standard
|
||||
tor01-vault: Toronto Vault
|
||||
tor01-cold: Toronto Cold
|
||||
tor01-flex: Toronto Flex
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
This acl is available on IBM Cloud (Infra), On-Premise IBM COS.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
Not supported on Buckets.
|
||||
This acl is available on IBM Cloud (Infra) and On-Premise IBM COS.
|
||||
ibm_api_key: true
|
||||
ibm_resource_instance_id: true
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false # returns BucketAlreadyExists
|
||||
7
backend/s3/provider/IDrive.yaml
Normal file
7
backend/s3/provider/IDrive.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
name: IDrive
|
||||
description: IDrive e2
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_already_exists: false
|
||||
17
backend/s3/provider/IONOS.yaml
Normal file
17
backend/s3/provider/IONOS.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: IONOS
|
||||
description: IONOS Cloud
|
||||
region:
|
||||
de: Frankfurt, Germany
|
||||
eu-central-2: Berlin, Germany
|
||||
eu-south-2: Logrono, Spain
|
||||
endpoint:
|
||||
s3-eu-central-1.ionoscloud.com: Frankfurt, Germany
|
||||
s3-eu-central-2.ionoscloud.com: Berlin, Germany
|
||||
s3-eu-south-2.ionoscloud.com: Logrono, Spain
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
10
backend/s3/provider/Intercolo.yaml
Normal file
10
backend/s3/provider/Intercolo.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Intercolo
|
||||
description: Intercolo Object Storage
|
||||
region:
|
||||
de-fra: Frankfurt, Germany
|
||||
endpoint:
|
||||
de-fra.i3storage.com: Frankfurt, Germany
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_unsigned_payload: false # has trailer support
|
||||
11
backend/s3/provider/Leviia.yaml
Normal file
11
backend/s3/provider/Leviia.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Leviia
|
||||
description: Leviia Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.leviia.com: |-
|
||||
The default endpoint
|
||||
Leviia
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Liara.yaml
Normal file
15
backend/s3/provider/Liara.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Liara
|
||||
description: Liara Object Storage
|
||||
endpoint:
|
||||
storage.iran.liara.space: |-
|
||||
The default endpoint
|
||||
Iran
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false # mulitpart ETags differ from AWS
|
||||
use_already_exists: false
|
||||
26
backend/s3/provider/Linode.yaml
Normal file
26
backend/s3/provider/Linode.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Linode
|
||||
description: Linode Object Storage
|
||||
endpoint:
|
||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
12
backend/s3/provider/LyveCloud.yaml
Normal file
12
backend/s3/provider/LyveCloud.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: LyveCloud
|
||||
description: Seagate Lyve Cloud
|
||||
region: {}
|
||||
endpoint:
|
||||
's3.us-west-1.{account_name}.lyve.seagate.com': US West 1 - California
|
||||
's3.eu-west-1.{account_name}.lyve.seagate.com': EU West 1 - Ireland
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false # multipart ETags differ from AWS
|
||||
use_already_exists: false
|
||||
16
backend/s3/provider/Magalu.yaml
Normal file
16
backend/s3/provider/Magalu.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Magalu
|
||||
description: Magalu Object Storage
|
||||
endpoint:
|
||||
br-se1.magaluobjects.com: São Paulo, SP (BR), br-se1
|
||||
br-ne1.magaluobjects.com: Fortaleza, CE (BR), br-ne1
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Mega.yaml
Normal file
15
backend/s3/provider/Mega.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Mega
|
||||
description: MEGA S4 Object Storage
|
||||
endpoint:
|
||||
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
||||
s3.eu-central-2.s4.mega.io: Mega S4 eu-central-2 (Bettembourg)
|
||||
s3.ca-central-1.s4.mega.io: Mega S4 ca-central-1 (Montreal)
|
||||
s3.ca-west-1.s4.mega.io: Mega S4 ca-west-1 (Vancouver)
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 2
|
||||
force_path_style: true
|
||||
list_url_encode: true
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
copy_cutoff: 9223372036854775807
|
||||
18
backend/s3/provider/Minio.yaml
Normal file
18
backend/s3/provider/Minio.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Minio
|
||||
description: Minio Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
12
backend/s3/provider/Netease.yaml
Normal file
12
backend/s3/provider/Netease.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Netease
|
||||
description: Netease Object Storage (NOS)
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
36
backend/s3/provider/OVHcloud.yaml
Normal file
36
backend/s3/provider/OVHcloud.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
name: OVHcloud
|
||||
description: OVHcloud Object Storage
|
||||
region:
|
||||
gra: Gravelines, France
|
||||
rbx: Roubaix, France
|
||||
sbg: Strasbourg, France
|
||||
eu-west-par: Paris, France (3AZ)
|
||||
de: Frankfurt, Germany
|
||||
uk: London, United Kingdom
|
||||
waw: Warsaw, Poland
|
||||
bhs: Beauharnois, Canada
|
||||
ca-east-tor: Toronto, Canada
|
||||
sgp: Singapore
|
||||
ap-southeast-syd: Sydney, Australia
|
||||
ap-south-mum: Mumbai, India
|
||||
us-east-va: Vint Hill, Virginia, USA
|
||||
us-west-or: Hillsboro, Oregon, USA
|
||||
rbx-archive: Roubaix, France (Cold Archive)
|
||||
endpoint:
|
||||
s3.gra.io.cloud.ovh.net: OVHcloud Gravelines, France
|
||||
s3.rbx.io.cloud.ovh.net: OVHcloud Roubaix, France
|
||||
s3.sbg.io.cloud.ovh.net: OVHcloud Strasbourg, France
|
||||
s3.eu-west-par.io.cloud.ovh.net: OVHcloud Paris, France (3AZ)
|
||||
s3.de.io.cloud.ovh.net: OVHcloud Frankfurt, Germany
|
||||
s3.uk.io.cloud.ovh.net: OVHcloud London, United Kingdom
|
||||
s3.waw.io.cloud.ovh.net: OVHcloud Warsaw, Poland
|
||||
s3.bhs.io.cloud.ovh.net: OVHcloud Beauharnois, Canada
|
||||
s3.ca-east-tor.io.cloud.ovh.net: OVHcloud Toronto, Canada
|
||||
s3.sgp.io.cloud.ovh.net: OVHcloud Singapore
|
||||
s3.ap-southeast-syd.io.cloud.ovh.net: OVHcloud Sydney, Australia
|
||||
s3.ap-south-mum.io.cloud.ovh.net: OVHcloud Mumbai, India
|
||||
s3.us-east-va.io.cloud.ovh.us: OVHcloud Vint Hill, Virginia, USA
|
||||
s3.us-west-or.io.cloud.ovh.us: OVHcloud Hillsboro, Oregon, USA
|
||||
s3.rbx-archive.io.cloud.ovh.net: OVHcloud Roubaix, France (Cold Archive)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
39
backend/s3/provider/Other.yaml
Normal file
39
backend/s3/provider/Other.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Other
|
||||
description: Any other S3 compatible provider
|
||||
region:
|
||||
'': |-
|
||||
Use this if unsure.
|
||||
Will use v4 signatures and an empty region.
|
||||
other-v2-signature: |-
|
||||
Use this only if v4 signatures don't work.
|
||||
E.g. pre Jewel/v10 CEPH.
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
18
backend/s3/provider/Outscale.yaml
Normal file
18
backend/s3/provider/Outscale.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Outscale
|
||||
description: OUTSCALE Object Storage (OOS)
|
||||
region:
|
||||
eu-west-2: Paris, France
|
||||
us-east-2: New Jersey, USA
|
||||
us-west-1: California, USA
|
||||
cloudgouv-eu-west-1: SecNumCloud, Paris, France
|
||||
ap-northeast-1: Tokyo, Japan
|
||||
endpoint:
|
||||
oos.eu-west-2.outscale.com: Outscale EU West 2 (Paris)
|
||||
oos.us-east-2.outscale.com: Outscale US east 2 (New Jersey)
|
||||
oos.us-west-1.outscale.com: Outscale EU West 1 (California)
|
||||
oos.cloudgouv-eu-west-1.outscale.com: Outscale SecNumCloud (Paris)
|
||||
oos.ap-northeast-1.outscale.com: Outscale AP Northeast 1 (Japan)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
19
backend/s3/provider/Petabox.yaml
Normal file
19
backend/s3/provider/Petabox.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Petabox
|
||||
description: Petabox Object Storage
|
||||
region:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-central-1: Europe (Frankfurt)
|
||||
ap-southeast-1: Asia Pacific (Singapore)
|
||||
me-south-1: Middle East (Bahrain)
|
||||
sa-east-1: South America (São Paulo)
|
||||
endpoint:
|
||||
s3.petabox.io: US East (N. Virginia)
|
||||
s3.us-east-1.petabox.io: US East (N. Virginia)
|
||||
s3.eu-central-1.petabox.io: Europe (Frankfurt)
|
||||
s3.ap-southeast-1.petabox.io: Asia Pacific (Singapore)
|
||||
s3.me-south-1.petabox.io: Middle East (Bahrain)
|
||||
s3.sa-east-1.petabox.io: South America (São Paulo)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
53
backend/s3/provider/Qiniu.yaml
Normal file
53
backend/s3/provider/Qiniu.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Qiniu
|
||||
description: Qiniu Object Storage (Kodo)
|
||||
region:
|
||||
cn-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
East China Region 1.
|
||||
Needs location constraint cn-east-1.
|
||||
cn-east-2: |-
|
||||
East China Region 2.
|
||||
Needs location constraint cn-east-2.
|
||||
cn-north-1: |-
|
||||
North China Region 1.
|
||||
Needs location constraint cn-north-1.
|
||||
cn-south-1: |-
|
||||
South China Region 1.
|
||||
Needs location constraint cn-south-1.
|
||||
us-north-1: |-
|
||||
North America Region.
|
||||
Needs location constraint us-north-1.
|
||||
ap-southeast-1: |-
|
||||
Southeast Asia Region 1.
|
||||
Needs location constraint ap-southeast-1.
|
||||
ap-northeast-1: |-
|
||||
Northeast Asia Region 1.
|
||||
Needs location constraint ap-northeast-1.
|
||||
endpoint:
|
||||
s3-cn-east-1.qiniucs.com: East China Endpoint 1
|
||||
s3-cn-east-2.qiniucs.com: East China Endpoint 2
|
||||
s3-cn-north-1.qiniucs.com: North China Endpoint 1
|
||||
s3-cn-south-1.qiniucs.com: South China Endpoint 1
|
||||
s3-us-north-1.qiniucs.com: North America Endpoint 1
|
||||
s3-ap-southeast-1.qiniucs.com: Southeast Asia Endpoint 1
|
||||
s3-ap-northeast-1.qiniucs.com: Northeast Asia Endpoint 1
|
||||
location_constraint:
|
||||
cn-east-1: East China Region 1
|
||||
cn-east-2: East China Region 2
|
||||
cn-north-1: North China Region 1
|
||||
cn-south-1: South China Region 1
|
||||
us-north-1: North America Region 1
|
||||
ap-southeast-1: Southeast Asia Region 1
|
||||
ap-northeast-1: Northeast Asia Region 1
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
LINE: Infrequent access storage mode
|
||||
GLACIER: Archive storage mode
|
||||
DEEP_ARCHIVE: Deep archive storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
list_url_encode: false
|
||||
force_path_style: true
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Rabata.yaml
Normal file
15
backend/s3/provider/Rabata.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Rabata
|
||||
description: Rabata Cloud Storage
|
||||
region:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-west-1: EU (Ireland)
|
||||
eu-west-2: EU (London)
|
||||
endpoint:
|
||||
s3.us-east-1.rabata.io: US East (N. Virginia)
|
||||
s3.eu-west-1.rabata.io: EU West (Ireland)
|
||||
s3.eu-west-2.rabata.io: EU West (London)
|
||||
location_constraint:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-west-1: EU (Ireland)
|
||||
eu-west-2: EU (London)
|
||||
# server side copy not supported
|
||||
67
backend/s3/provider/RackCorp.yaml
Normal file
67
backend/s3/provider/RackCorp.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
name: RackCorp
|
||||
description: RackCorp Object Storage
|
||||
region:
|
||||
global: Global CDN (All locations) Region
|
||||
au: Australia (All states)
|
||||
au-nsw: NSW (Australia) Region
|
||||
au-qld: QLD (Australia) Region
|
||||
au-vic: VIC (Australia) Region
|
||||
au-wa: Perth (Australia) Region
|
||||
ph: Manila (Philippines) Region
|
||||
th: Bangkok (Thailand) Region
|
||||
hk: HK (Hong Kong) Region
|
||||
mn: Ulaanbaatar (Mongolia) Region
|
||||
kg: Bishkek (Kyrgyzstan) Region
|
||||
id: Jakarta (Indonesia) Region
|
||||
jp: Tokyo (Japan) Region
|
||||
sg: SG (Singapore) Region
|
||||
de: Frankfurt (Germany) Region
|
||||
us: USA (AnyCast) Region
|
||||
us-east-1: New York (USA) Region
|
||||
us-west-1: Freemont (USA) Region
|
||||
nz: Auckland (New Zealand) Region
|
||||
endpoint:
|
||||
s3.rackcorp.com: Global (AnyCast) Endpoint
|
||||
au.s3.rackcorp.com: Australia (Anycast) Endpoint
|
||||
au-nsw.s3.rackcorp.com: Sydney (Australia) Endpoint
|
||||
au-qld.s3.rackcorp.com: Brisbane (Australia) Endpoint
|
||||
au-vic.s3.rackcorp.com: Melbourne (Australia) Endpoint
|
||||
au-wa.s3.rackcorp.com: Perth (Australia) Endpoint
|
||||
ph.s3.rackcorp.com: Manila (Philippines) Endpoint
|
||||
th.s3.rackcorp.com: Bangkok (Thailand) Endpoint
|
||||
hk.s3.rackcorp.com: HK (Hong Kong) Endpoint
|
||||
mn.s3.rackcorp.com: Ulaanbaatar (Mongolia) Endpoint
|
||||
kg.s3.rackcorp.com: Bishkek (Kyrgyzstan) Endpoint
|
||||
id.s3.rackcorp.com: Jakarta (Indonesia) Endpoint
|
||||
jp.s3.rackcorp.com: Tokyo (Japan) Endpoint
|
||||
sg.s3.rackcorp.com: SG (Singapore) Endpoint
|
||||
de.s3.rackcorp.com: Frankfurt (Germany) Endpoint
|
||||
us.s3.rackcorp.com: USA (AnyCast) Endpoint
|
||||
us-east-1.s3.rackcorp.com: New York (USA) Endpoint
|
||||
us-west-1.s3.rackcorp.com: Freemont (USA) Endpoint
|
||||
nz.s3.rackcorp.com: Auckland (New Zealand) Endpoint
|
||||
location_constraint:
|
||||
global: Global CDN Region
|
||||
au: Australia (All locations)
|
||||
au-nsw: NSW (Australia) Region
|
||||
au-qld: QLD (Australia) Region
|
||||
au-vic: VIC (Australia) Region
|
||||
au-wa: Perth (Australia) Region
|
||||
ph: Manila (Philippines) Region
|
||||
th: Bangkok (Thailand) Region
|
||||
hk: HK (Hong Kong) Region
|
||||
mn: Ulaanbaatar (Mongolia) Region
|
||||
kg: Bishkek (Kyrgyzstan) Region
|
||||
id: Jakarta (Indonesia) Region
|
||||
jp: Tokyo (Japan) Region
|
||||
sg: SG (Singapore) Region
|
||||
de: Frankfurt (Germany) Region
|
||||
us: USA (AnyCast) Region
|
||||
us-east-1: New York (USA) Region
|
||||
us-west-1: Fremont (USA) Region
|
||||
nz: Auckland (New Zealand) Region
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Rclone.yaml
Normal file
11
backend/s3/provider/Rclone.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Rclone
|
||||
description: Rclone S3 Server
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
# rclone serve doesn't support multi-part server side copy:
|
||||
# See: https://github.com/rclone/rclone/issues/7454
|
||||
# So make cutoff very large which it does support
|
||||
copy_cutoff: 9223372036854775807
|
||||
28
backend/s3/provider/Scaleway.yaml
Normal file
28
backend/s3/provider/Scaleway.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Scaleway
|
||||
description: Scaleway Object Storage
|
||||
region:
|
||||
nl-ams: Amsterdam, The Netherlands
|
||||
fr-par: Paris, France
|
||||
pl-waw: Warsaw, Poland
|
||||
endpoint:
|
||||
s3.nl-ams.scw.cloud: Amsterdam Endpoint
|
||||
s3.fr-par.scw.cloud: Paris Endpoint
|
||||
s3.pl-waw.scw.cloud: Warsaw Endpoint
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default.
|
||||
STANDARD: |-
|
||||
The Standard class for any upload.
|
||||
Suitable for on-demand content like streaming or CDN.
|
||||
Available in all regions.
|
||||
GLACIER: |-
|
||||
Archived storage.
|
||||
Prices are lower, but it needs to be restored first to be accessed.
|
||||
Available in FR-PAR and NL-AMS regions.
|
||||
ONEZONE_IA: |-
|
||||
One Zone - Infrequent Access.
|
||||
A good choice for storing secondary backup copies or easily re-creatable data.
|
||||
Available in the FR-PAR region only.
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
max_upload_parts: 1000
|
||||
14
backend/s3/provider/SeaweedFS.yaml
Normal file
14
backend/s3/provider/SeaweedFS.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: SeaweedFS
|
||||
description: SeaweedFS S3
|
||||
region: {}
|
||||
endpoint:
|
||||
localhost:8333: SeaweedFS S3 localhost
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
8
backend/s3/provider/Selectel.yaml
Normal file
8
backend/s3/provider/Selectel.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
17
backend/s3/provider/Servercore.yaml
Normal file
17
backend/s3/provider/Servercore.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Servercore
|
||||
description: Servercore Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
gis-1: Moscow
|
||||
ru-7: Moscow
|
||||
uz-2: Tashkent, Uzbekistan
|
||||
kz-1: Almaty, Kazakhstan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.uz-2.srvstorage.uz: Tashkent, Uzbekistan
|
||||
s3.kz-1.srvstorage.kz: Almaty, Kazakhstan
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
5
backend/s3/provider/SpectraLogic.yaml
Normal file
5
backend/s3/provider/SpectraLogic.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
name: SpectraLogic
|
||||
description: Spectra Logic Black Pearl
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true # path-style required
|
||||
14
backend/s3/provider/StackPath.yaml
Normal file
14
backend/s3/provider/StackPath.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: StackPath
|
||||
description: StackPath Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.us-east-2.stackpathstorage.com: US East Endpoint
|
||||
s3.us-west-1.stackpathstorage.com: US West Endpoint
|
||||
s3.eu-central-1.stackpathstorage.com: EU Endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Storj.yaml
Normal file
11
backend/s3/provider/Storj.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Storj
|
||||
description: Storj (S3 Compatible Gateway)
|
||||
endpoint:
|
||||
gateway.storjshare.io: Global Hosted Gateway
|
||||
quirks:
|
||||
use_already_exists: false # returns BucketAlreadyExists
|
||||
# Storj doesn't support multi-part server side copy:
|
||||
# https://github.com/storj/roadmap/issues/40
|
||||
# So make cutoff very large which it does support
|
||||
copy_cutoff: 9223372036854775807
|
||||
min_chunk_size: 67108864
|
||||
18
backend/s3/provider/Synology.yaml
Normal file
18
backend/s3/provider/Synology.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Synology
|
||||
description: Synology C2 Object Storage
|
||||
region:
|
||||
eu-001: Europe Region 1
|
||||
eu-002: Europe Region 2
|
||||
us-001: US Region 1
|
||||
us-002: US Region 2
|
||||
tw-001: Asia (Taiwan)
|
||||
endpoint:
|
||||
eu-001.s3.synologyc2.net: EU Endpoint 1
|
||||
eu-002.s3.synologyc2.net: EU Endpoint 2
|
||||
us-001.s3.synologyc2.net: US Endpoint 1
|
||||
us-002.s3.synologyc2.net: US Endpoint 2
|
||||
tw-001.s3.synologyc2.net: TW Endpoint 1
|
||||
location_constraint: {}
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
52
backend/s3/provider/TencentCOS.yaml
Normal file
52
backend/s3/provider/TencentCOS.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
name: TencentCOS
|
||||
description: Tencent Cloud Object Storage (COS)
|
||||
endpoint:
|
||||
cos.ap-beijing.myqcloud.com: Beijing Region
|
||||
cos.ap-nanjing.myqcloud.com: Nanjing Region
|
||||
cos.ap-shanghai.myqcloud.com: Shanghai Region
|
||||
cos.ap-guangzhou.myqcloud.com: Guangzhou Region
|
||||
cos.ap-chengdu.myqcloud.com: Chengdu Region
|
||||
cos.ap-chongqing.myqcloud.com: Chongqing Region
|
||||
cos.ap-hongkong.myqcloud.com: Hong Kong (China) Region
|
||||
cos.ap-singapore.myqcloud.com: Singapore Region
|
||||
cos.ap-mumbai.myqcloud.com: Mumbai Region
|
||||
cos.ap-seoul.myqcloud.com: Seoul Region
|
||||
cos.ap-bangkok.myqcloud.com: Bangkok Region
|
||||
cos.ap-tokyo.myqcloud.com: Tokyo Region
|
||||
cos.na-siliconvalley.myqcloud.com: Silicon Valley Region
|
||||
cos.na-ashburn.myqcloud.com: Virginia Region
|
||||
cos.na-toronto.myqcloud.com: Toronto Region
|
||||
cos.eu-frankfurt.myqcloud.com: Frankfurt Region
|
||||
cos.eu-moscow.myqcloud.com: Moscow Region
|
||||
cos.accelerate.myqcloud.com: Use Tencent COS Accelerate Endpoint
|
||||
acl:
|
||||
default: |-
|
||||
Owner gets Full_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
ARCHIVE: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
21
backend/s3/provider/Wasabi.yaml
Normal file
21
backend/s3/provider/Wasabi.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Wasabi
|
||||
description: Wasabi Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.wasabisys.com: Wasabi US East 1 (N. Virginia)
|
||||
s3.us-east-2.wasabisys.com: Wasabi US East 2 (N. Virginia)
|
||||
s3.us-central-1.wasabisys.com: Wasabi US Central 1 (Texas)
|
||||
s3.us-west-1.wasabisys.com: Wasabi US West 1 (Oregon)
|
||||
s3.ca-central-1.wasabisys.com: Wasabi CA Central 1 (Toronto)
|
||||
s3.eu-central-1.wasabisys.com: Wasabi EU Central 1 (Amsterdam)
|
||||
s3.eu-central-2.wasabisys.com: Wasabi EU Central 2 (Frankfurt)
|
||||
s3.eu-west-1.wasabisys.com: Wasabi EU West 1 (London)
|
||||
s3.eu-west-2.wasabisys.com: Wasabi EU West 2 (Paris)
|
||||
s3.eu-south-1.wasabisys.com: Wasabi EU South 1 (Milan)
|
||||
s3.ap-northeast-1.wasabisys.com: Wasabi AP Northeast 1 (Tokyo) endpoint
|
||||
s3.ap-northeast-2.wasabisys.com: Wasabi AP Northeast 2 (Osaka) endpoint
|
||||
s3.ap-southeast-1.wasabisys.com: Wasabi AP Southeast 1 (Singapore)
|
||||
s3.ap-southeast-2.wasabisys.com: Wasabi AP Southeast 2 (Sydney)
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
14
backend/s3/provider/Zata.yaml
Normal file
14
backend/s3/provider/Zata.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Zata
|
||||
description: Zata (S3 compatible Gateway)
|
||||
region:
|
||||
us-east-1: Indore, Madhya Pradesh, India
|
||||
endpoint:
|
||||
idr01.zata.ai: South Asia Endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
might_gzip: false
|
||||
use_unsigned_payload: false
|
||||
use_already_exists: false
|
||||
237
backend/s3/providers.go
Normal file
237
backend/s3/providers.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"embed"
|
||||
stdfs "io/fs"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// YamlMap is converted to YAML in the correct order
|
||||
type YamlMap = *orderedmap.OrderedMap[string, string]
|
||||
|
||||
// NewYamlMap creates a new ordered map
|
||||
var NewYamlMap = orderedmap.New[string, string]
|
||||
|
||||
// Quirks defines all the S3 provider quirks
|
||||
type Quirks struct {
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
UseDataIntegrityProtections *bool `yaml:"use_data_integrity_protections,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
}
|
||||
|
||||
// Provider defines the configurable data in each provider.yaml
|
||||
type Provider struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Description string `yaml:"description,omitempty"`
|
||||
Region YamlMap `yaml:"region,omitempty"`
|
||||
Endpoint YamlMap `yaml:"endpoint,omitempty"`
|
||||
LocationConstraint YamlMap `yaml:"location_constraint,omitempty"`
|
||||
ACL YamlMap `yaml:"acl,omitempty"`
|
||||
StorageClass YamlMap `yaml:"storage_class,omitempty"`
|
||||
ServerSideEncryption YamlMap `yaml:"server_side_encryption,omitempty"`
|
||||
|
||||
// other
|
||||
IBMApiKey bool `yaml:"ibm_api_key,omitempty"`
|
||||
IBMResourceInstanceID bool `yaml:"ibm_resource_instance_id,omitempty"`
|
||||
|
||||
// advanced
|
||||
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
||||
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
||||
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
||||
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
||||
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
||||
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
||||
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
||||
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
||||
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
||||
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
||||
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
||||
|
||||
Quirks Quirks `yaml:"quirks,omitempty"`
|
||||
}
|
||||
|
||||
//go:embed provider/*.yaml
|
||||
var providerFS embed.FS
|
||||
|
||||
// addProvidersToInfo adds provider information to the fs.RegInfo
|
||||
func addProvidersToInfo(info *fs.RegInfo) *fs.RegInfo {
|
||||
providerMap := loadProviders()
|
||||
providerList := constructProviders(info.Options, providerMap)
|
||||
info.Description += strings.TrimSuffix(providerList, ", ")
|
||||
return info
|
||||
}
|
||||
|
||||
// loadProvider loads a single provider
|
||||
//
|
||||
// It returns nil if it could not be found except if "Other" which is a fatal error.
|
||||
func loadProvider(name string) *Provider {
|
||||
data, err := stdfs.ReadFile(providerFS, "provider/"+name+".yaml")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) && name != "Other" {
|
||||
return nil
|
||||
}
|
||||
fs.Fatalf(nil, "internal error: failed to load provider %q: %v", name, err)
|
||||
}
|
||||
var p Provider
|
||||
err = yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "internal error: failed to unmarshal provider %q: %v", name, err)
|
||||
}
|
||||
return &p
|
||||
}
|
||||
|
||||
// loadProviders loads provider definitions from embedded YAML files
|
||||
func loadProviders() map[string]*Provider {
|
||||
providers, err := stdfs.ReadDir(providerFS, "provider")
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "internal error: failed to read embedded providers: %v", err)
|
||||
}
|
||||
providerMap := make(map[string]*Provider, len(providers))
|
||||
|
||||
for _, provider := range providers {
|
||||
name, _ := strings.CutSuffix(provider.Name(), ".yaml")
|
||||
p := loadProvider(name)
|
||||
providerMap[p.Name] = p
|
||||
}
|
||||
return providerMap
|
||||
}
|
||||
|
||||
// constructProviders populates fs.Options with provider-specific examples and information
|
||||
func constructProviders(options fs.Options, providerMap map[string]*Provider) string {
|
||||
// Defaults for map options set to {}
|
||||
defaults := providerMap["Other"]
|
||||
|
||||
// sort providers: AWS first, Other last, rest alphabetically
|
||||
providers := make([]*Provider, 0, len(providerMap))
|
||||
for _, p := range providerMap {
|
||||
providers = append(providers, p)
|
||||
}
|
||||
sort.Slice(providers, func(i, j int) bool {
|
||||
if providers[i].Name == "AWS" {
|
||||
return true
|
||||
}
|
||||
if providers[j].Name == "AWS" {
|
||||
return false
|
||||
}
|
||||
if providers[i].Name == "Other" {
|
||||
return false
|
||||
}
|
||||
if providers[j].Name == "Other" {
|
||||
return true
|
||||
}
|
||||
return strings.ToLower(providers[i].Name) < strings.ToLower(providers[j].Name)
|
||||
})
|
||||
|
||||
addProvider := func(sp *string, name string) {
|
||||
if *sp != "" {
|
||||
*sp += ","
|
||||
}
|
||||
*sp += name
|
||||
}
|
||||
|
||||
addBool := func(opt *fs.Option, p *Provider, flag bool) {
|
||||
if flag {
|
||||
addProvider(&opt.Provider, p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
addExample := func(opt *fs.Option, p *Provider, examples, defaultExamples YamlMap) {
|
||||
if examples == nil {
|
||||
return
|
||||
}
|
||||
if examples.Len() == 0 {
|
||||
examples = defaultExamples
|
||||
}
|
||||
addProvider(&opt.Provider, p.Name)
|
||||
OUTER:
|
||||
for pair := examples.Oldest(); pair != nil; pair = pair.Next() {
|
||||
// Find an existing example to add to if possible
|
||||
for i, example := range opt.Examples {
|
||||
if example.Value == pair.Key && example.Help == pair.Value {
|
||||
addProvider(&opt.Examples[i].Provider, p.Name)
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
// Otherwise add a new one
|
||||
opt.Examples = append(opt.Examples, fs.OptionExample{
|
||||
Value: pair.Key,
|
||||
Help: pair.Value,
|
||||
Provider: p.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var providerList strings.Builder
|
||||
|
||||
for _, p := range providers {
|
||||
for i := range options {
|
||||
opt := &options[i]
|
||||
switch opt.Name {
|
||||
case "provider":
|
||||
opt.Examples = append(opt.Examples, fs.OptionExample{
|
||||
Value: p.Name,
|
||||
Help: p.Description,
|
||||
})
|
||||
providerList.WriteString(p.Name + ", ")
|
||||
case "region":
|
||||
addExample(opt, p, p.Region, defaults.Region)
|
||||
case "endpoint":
|
||||
addExample(opt, p, p.Endpoint, defaults.Endpoint)
|
||||
case "location_constraint":
|
||||
addExample(opt, p, p.LocationConstraint, defaults.LocationConstraint)
|
||||
case "acl":
|
||||
addExample(opt, p, p.ACL, defaults.ACL)
|
||||
case "storage_class":
|
||||
addExample(opt, p, p.StorageClass, defaults.StorageClass)
|
||||
case "server_side_encryption":
|
||||
addExample(opt, p, p.ServerSideEncryption, defaults.ServerSideEncryption)
|
||||
case "bucket_acl":
|
||||
addBool(opt, p, p.BucketACL)
|
||||
case "requester_pays":
|
||||
addBool(opt, p, p.RequesterPays)
|
||||
case "sse_customer_algorithm":
|
||||
addBool(opt, p, p.SSECustomerAlgorithm)
|
||||
case "sse_kms_key_id":
|
||||
addBool(opt, p, p.SSEKmsKeyID)
|
||||
case "sse_customer_key":
|
||||
addBool(opt, p, p.SSECustomerKey)
|
||||
case "sse_customer_key_base64":
|
||||
addBool(opt, p, p.SSECustomerKeyBase64)
|
||||
case "sse_customer_key_md5":
|
||||
addBool(opt, p, p.SSECustomerKeyMd5)
|
||||
case "directory_bucket":
|
||||
addBool(opt, p, p.DirectoryBucket)
|
||||
case "ibm_api_key":
|
||||
addBool(opt, p, p.IBMApiKey)
|
||||
case "ibm_resource_instance_id":
|
||||
addBool(opt, p, p.IBMResourceInstanceID)
|
||||
case "leave_parts_on_error":
|
||||
addBool(opt, p, p.LeavePartsOnError)
|
||||
case "sts_endpoint":
|
||||
addBool(opt, p, p.STSEndpoint)
|
||||
case "use_accelerate_endpoint":
|
||||
addBool(opt, p, p.UseAccelerateEndpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(providerList.String(), ", ")
|
||||
}
|
||||
3091
backend/s3/s3.go
3091
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -62,14 +62,14 @@ func TestAWSDualStackOption(t *testing.T) {
|
||||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
s3Conn, _, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
s3Conn, _, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user