mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
4 Commits
fix-8980-d
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46bc876085 | ||
|
|
a58efc1544 | ||
|
|
4f5efe2871 | ||
|
|
6d9f4a3c20 |
14
.github/workflows/build.yml
vendored
14
.github/workflows/build.yml
vendored
@@ -239,13 +239,13 @@ jobs:
|
|||||||
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-
|
||||||
|
|
||||||
- name: Code quality test (Linux)
|
- name: Code quality test (Linux)
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v8
|
||||||
with:
|
with:
|
||||||
version: latest
|
version: latest
|
||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (Windows)
|
- name: Code quality test (Windows)
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "windows"
|
GOOS: "windows"
|
||||||
with:
|
with:
|
||||||
@@ -253,7 +253,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (macOS)
|
- name: Code quality test (macOS)
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "darwin"
|
GOOS: "darwin"
|
||||||
with:
|
with:
|
||||||
@@ -261,7 +261,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (FreeBSD)
|
- name: Code quality test (FreeBSD)
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "freebsd"
|
GOOS: "freebsd"
|
||||||
with:
|
with:
|
||||||
@@ -269,7 +269,7 @@ jobs:
|
|||||||
skip-cache: true
|
skip-cache: true
|
||||||
|
|
||||||
- name: Code quality test (OpenBSD)
|
- name: Code quality test (OpenBSD)
|
||||||
uses: golangci/golangci-lint-action@v9
|
uses: golangci/golangci-lint-action@v8
|
||||||
env:
|
env:
|
||||||
GOOS: "openbsd"
|
GOOS: "openbsd"
|
||||||
with:
|
with:
|
||||||
@@ -291,9 +291,7 @@ jobs:
|
|||||||
README.md
|
README.md
|
||||||
RELEASE.md
|
RELEASE.md
|
||||||
CODE_OF_CONDUCT.md
|
CODE_OF_CONDUCT.md
|
||||||
librclone\README.md
|
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||||
backend\s3\README.md
|
|
||||||
docs/content/{_index,authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
- name: Scan edits of autogenerated files
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||||
|
|||||||
@@ -41,32 +41,3 @@ single-title: # MD025
|
|||||||
# Markdown files we must use whatever works in the final HTML generated docs.
|
# Markdown files we must use whatever works in the final HTML generated docs.
|
||||||
# Suppress Markdownlint warning: Link fragments should be valid.
|
# Suppress Markdownlint warning: Link fragments should be valid.
|
||||||
link-fragments: false # MD051
|
link-fragments: false # MD051
|
||||||
|
|
||||||
# Restrict the languages and language identifiers to use for code blocks.
|
|
||||||
# We only want those supported by both Hugo and GitHub. These are documented
|
|
||||||
# here:
|
|
||||||
# https://gohugo.io/content-management/syntax-highlighting/#languages
|
|
||||||
# https://docs.github.com//get-started/writing-on-github/working-with-advanced-formatting/creating-and-highlighting-code-blocks#syntax-highlighting
|
|
||||||
# In addition, we only want to allow identifiers (aliases) that correspond to
|
|
||||||
# the same language in Hugo and GitHub, and preferrably also VSCode and other
|
|
||||||
# commonly used tools, to avoid confusion. An example of this is that "shell"
|
|
||||||
# by some are considered an identifier for shell scripts, i.e. an alias for
|
|
||||||
# "sh", while others consider it an identifier for shell sessions, i.e. an
|
|
||||||
# alias for "console". Although Hugo and GitHub in this case are consistent and
|
|
||||||
# have choosen the former, using "sh" instead, and not allowing use of "shell",
|
|
||||||
# avoids the confusion entirely.
|
|
||||||
fenced-code-language: # MD040
|
|
||||||
allowed_languages:
|
|
||||||
- text
|
|
||||||
- console
|
|
||||||
- sh
|
|
||||||
- bat
|
|
||||||
- ini
|
|
||||||
- json
|
|
||||||
- yaml
|
|
||||||
- go
|
|
||||||
- python
|
|
||||||
- c++
|
|
||||||
- c#
|
|
||||||
- java
|
|
||||||
- powershell
|
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ and [email](https://docs.github.com/en/github/setting-up-and-managing-your-githu
|
|||||||
Next open your terminal, change directory to your preferred folder and initialise
|
Next open your terminal, change directory to your preferred folder and initialise
|
||||||
your local rclone project:
|
your local rclone project:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git clone https://github.com/rclone/rclone.git
|
git clone https://github.com/rclone/rclone.git
|
||||||
cd rclone
|
cd rclone
|
||||||
git remote rename origin upstream
|
git remote rename origin upstream
|
||||||
@@ -53,13 +53,13 @@ executed from the rclone folder created above.
|
|||||||
|
|
||||||
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
Now [install Go](https://golang.org/doc/install) and verify your installation:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go version
|
go version
|
||||||
```
|
```
|
||||||
|
|
||||||
Great, you can now compile and execute your own version of rclone:
|
Great, you can now compile and execute your own version of rclone:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go build
|
go build
|
||||||
./rclone version
|
./rclone version
|
||||||
```
|
```
|
||||||
@@ -68,7 +68,7 @@ go build
|
|||||||
more accurate version number in the executable as well as enable you to specify
|
more accurate version number in the executable as well as enable you to specify
|
||||||
more build options.) Finally make a branch to add your new feature
|
more build options.) Finally make a branch to add your new feature
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git checkout -b my-new-feature
|
git checkout -b my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -80,7 +80,7 @@ and a quick view on the rclone [code organisation](#code-organisation).
|
|||||||
When ready - test the affected functionality and run the unit tests for the
|
When ready - test the affected functionality and run the unit tests for the
|
||||||
code you changed
|
code you changed
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
cd folder/with/changed/files
|
cd folder/with/changed/files
|
||||||
go test -v
|
go test -v
|
||||||
```
|
```
|
||||||
@@ -99,7 +99,7 @@ Make sure you
|
|||||||
|
|
||||||
When you are done with that push your changes to GitHub:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ or [squash your commits](#squashing-your-commits).
|
|||||||
|
|
||||||
Follow the guideline for [commit messages](#commit-messages) and then:
|
Follow the guideline for [commit messages](#commit-messages) and then:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git checkout my-new-feature # To switch to your branch
|
git checkout my-new-feature # To switch to your branch
|
||||||
git status # To see the new and changed files
|
git status # To see the new and changed files
|
||||||
git add FILENAME # To select FILENAME for the commit
|
git add FILENAME # To select FILENAME for the commit
|
||||||
@@ -130,7 +130,7 @@ git log # To verify the commit. Use q to quit the log
|
|||||||
|
|
||||||
You can modify the message or changes in the latest commit using:
|
You can modify the message or changes in the latest commit using:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git commit --amend
|
git commit --amend
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -145,7 +145,7 @@ pushed to GitHub.
|
|||||||
|
|
||||||
Your previously pushed commits are replaced by:
|
Your previously pushed commits are replaced by:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git push --force origin my-new-feature
|
git push --force origin my-new-feature
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ git push --force origin my-new-feature
|
|||||||
To base your changes on the latest version of the
|
To base your changes on the latest version of the
|
||||||
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
[rclone master](https://github.com/rclone/rclone/tree/master) (upstream):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git checkout master
|
git checkout master
|
||||||
git fetch upstream
|
git fetch upstream
|
||||||
git merge --ff-only
|
git merge --ff-only
|
||||||
@@ -170,7 +170,7 @@ If you rebase commits that have been pushed to GitHub, then you will have to
|
|||||||
|
|
||||||
To combine your commits into one commit:
|
To combine your commits into one commit:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git log # To count the commits to squash, e.g. the last 2
|
git log # To count the commits to squash, e.g. the last 2
|
||||||
git reset --soft HEAD~2 # To undo the 2 latest commits
|
git reset --soft HEAD~2 # To undo the 2 latest commits
|
||||||
git status # To check everything is as expected
|
git status # To check everything is as expected
|
||||||
@@ -178,13 +178,13 @@ git status # To check everything is as expected
|
|||||||
|
|
||||||
If everything is fine, then make the new combined commit:
|
If everything is fine, then make the new combined commit:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git commit # To commit the undone commits as one
|
git commit # To commit the undone commits as one
|
||||||
```
|
```
|
||||||
|
|
||||||
otherwise, you may roll back using:
|
otherwise, you may roll back using:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git reflog # To check that HEAD{1} is your previous state
|
git reflog # To check that HEAD{1} is your previous state
|
||||||
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
git reset --soft 'HEAD@{1}' # To roll back to your previous state
|
||||||
```
|
```
|
||||||
@@ -219,13 +219,13 @@ to check an error return).
|
|||||||
rclone's tests are run from the go testing framework, so at the top
|
rclone's tests are run from the go testing framework, so at the top
|
||||||
level you can run this to run all the tests.
|
level you can run this to run all the tests.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go test -v ./...
|
go test -v ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use `make`, if supported by your platform
|
You can also use `make`, if supported by your platform
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
make quicktest
|
make quicktest
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -246,7 +246,7 @@ need to make a remote called `TestDrive`.
|
|||||||
You can then run the unit tests in the drive directory. These tests
|
You can then run the unit tests in the drive directory. These tests
|
||||||
are skipped if `TestDrive:` isn't defined.
|
are skipped if `TestDrive:` isn't defined.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
cd backend/drive
|
cd backend/drive
|
||||||
go test -v
|
go test -v
|
||||||
```
|
```
|
||||||
@@ -255,7 +255,7 @@ You can then run the integration tests which test all of rclone's
|
|||||||
operations. Normally these get run against the local file system,
|
operations. Normally these get run against the local file system,
|
||||||
but they can be run against any of the remotes.
|
but they can be run against any of the remotes.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
cd fs/sync
|
cd fs/sync
|
||||||
go test -v -remote TestDrive:
|
go test -v -remote TestDrive:
|
||||||
go test -v -remote TestDrive: -fast-list
|
go test -v -remote TestDrive: -fast-list
|
||||||
@@ -268,8 +268,9 @@ If you want to use the integration test framework to run these tests
|
|||||||
altogether with an HTML report and test retries then from the
|
altogether with an HTML report and test retries then from the
|
||||||
project root:
|
project root:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go run ./fstest/test_all -backends drive
|
go install github.com/rclone/rclone/fstest/test_all
|
||||||
|
test_all -backends drive
|
||||||
```
|
```
|
||||||
|
|
||||||
### Full integration testing
|
### Full integration testing
|
||||||
@@ -277,19 +278,19 @@ go run ./fstest/test_all -backends drive
|
|||||||
If you want to run all the integration tests against all the remotes,
|
If you want to run all the integration tests against all the remotes,
|
||||||
then change into the project root and run
|
then change into the project root and run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
make check
|
make check
|
||||||
make test
|
make test
|
||||||
```
|
```
|
||||||
|
|
||||||
The commands may require some extra go packages which you can install with
|
The commands may require some extra go packages which you can install with
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
make build_dep
|
make build_dep
|
||||||
```
|
```
|
||||||
|
|
||||||
The full integration tests are run daily on the integration test server. You can
|
The full integration tests are run daily on the integration test server. You can
|
||||||
find the results at <https://integration.rclone.org>
|
find the results at <https://pub.rclone.org/integration-tests/>
|
||||||
|
|
||||||
## Code Organisation
|
## Code Organisation
|
||||||
|
|
||||||
@@ -348,13 +349,11 @@ If you are adding a new feature then please update the documentation.
|
|||||||
|
|
||||||
The documentation sources are generally in Markdown format, in conformance
|
The documentation sources are generally in Markdown format, in conformance
|
||||||
with the CommonMark specification and compatible with GitHub Flavored
|
with the CommonMark specification and compatible with GitHub Flavored
|
||||||
Markdown (GFM). The markdown format and style is checked as part of the lint
|
Markdown (GFM). The markdown format is checked as part of the lint operation
|
||||||
operation that runs automatically on pull requests, to enforce standards and
|
that runs automatically on pull requests, to enforce standards and consistency.
|
||||||
consistency. This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
This is based on the [markdownlint](https://github.com/DavidAnson/markdownlint)
|
||||||
tool by David Anson, which can also be integrated into editors so you can
|
tool, which can also be integrated into editors so you can perform the same
|
||||||
perform the same checks while writing. It generally follows Ciro Santilli's
|
checks while writing.
|
||||||
[Markdown Style Guide](https://cirosantilli.com/markdown-style-guide), which
|
|
||||||
is good source if you want to know more.
|
|
||||||
|
|
||||||
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
HTML pages, served as website <rclone.org>, are generated from the Markdown,
|
||||||
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
using [Hugo](https://gohugo.io). Note that when generating the HTML pages,
|
||||||
@@ -383,7 +382,7 @@ If you add a new general flag (not for a backend), then document it in
|
|||||||
alphabetical order.
|
alphabetical order.
|
||||||
|
|
||||||
If you add a new backend option/flag, then it should be documented in
|
If you add a new backend option/flag, then it should be documented in
|
||||||
the source file in the `Help:` field:
|
the source file in the `Help:` field.
|
||||||
|
|
||||||
- Start with the most important information about the option,
|
- Start with the most important information about the option,
|
||||||
as a single sentence on a single line.
|
as a single sentence on a single line.
|
||||||
@@ -405,30 +404,6 @@ the source file in the `Help:` field:
|
|||||||
as an unordered list, therefore a single line break is enough to
|
as an unordered list, therefore a single line break is enough to
|
||||||
create a new list item. Also, for enumeration texts like name of
|
create a new list item. Also, for enumeration texts like name of
|
||||||
countries, it looks better without an ending period/full stop character.
|
countries, it looks better without an ending period/full stop character.
|
||||||
- You can run `make backenddocs` to verify the resulting Markdown.
|
|
||||||
- This will update the autogenerated sections of the backend docs Markdown
|
|
||||||
files under `docs/content`.
|
|
||||||
- It requires you to have [Python](https://www.python.org) installed.
|
|
||||||
- The `backenddocs` make target runs the Python script `bin/make_backend_docs.py`,
|
|
||||||
and you can also run this directly, optionally with the name of a backend
|
|
||||||
as argument to only update the docs for a specific backend.
|
|
||||||
- **Do not** commit the updated Markdown files. This operation is run as part of
|
|
||||||
the release process. Since any manual changes in the autogenerated sections
|
|
||||||
of the Markdown files will then be lost, we have a pull request check that
|
|
||||||
reports error for any changes within the autogenerated sections. Should you
|
|
||||||
have done manual changes outside of the autogenerated sections they must be
|
|
||||||
committed, of course.
|
|
||||||
- You can run `make serve` to verify the resulting website.
|
|
||||||
- This will build the website and serve it locally, so you can open it in
|
|
||||||
your web browser and verify that the end result looks OK. Check specifically
|
|
||||||
any added links, also in light of the note above regarding different algorithms
|
|
||||||
for generated header anchors.
|
|
||||||
- It requires you to have the [Hugo](https://gohugo.io) tool available.
|
|
||||||
- The `serve` make target depends on the `website` target, which runs the
|
|
||||||
`hugo` command from the `docs` directory to build the website, and then
|
|
||||||
it serves the website locally with an embedded web server using a command
|
|
||||||
`hugo server --logLevel info -w --disableFastRender --ignoreCache`, so you
|
|
||||||
can run similar Hugo commands directly as well.
|
|
||||||
|
|
||||||
When writing documentation for an entirely new backend,
|
When writing documentation for an entirely new backend,
|
||||||
see [backend documentation](#backend-documentation).
|
see [backend documentation](#backend-documentation).
|
||||||
@@ -445,11 +420,6 @@ for small changes in the docs which makes it very easy. Just remember the
|
|||||||
caveat when linking to header anchors, noted above, which means that GitHub's
|
caveat when linking to header anchors, noted above, which means that GitHub's
|
||||||
Markdown preview may not be an entirely reliable verification of the results.
|
Markdown preview may not be an entirely reliable verification of the results.
|
||||||
|
|
||||||
After your changes have been merged, you can verify them on
|
|
||||||
[tip.rclone.org](https://tip.rclone.org). This site is updated daily with the
|
|
||||||
current state of the master branch at 07:00 UTC. The changes will be on the main
|
|
||||||
[rclone.org](https://rclone.org) site once they have been included in a release.
|
|
||||||
|
|
||||||
## Making a release
|
## Making a release
|
||||||
|
|
||||||
There are separate instructions for making a release in the RELEASE.md
|
There are separate instructions for making a release in the RELEASE.md
|
||||||
@@ -508,7 +478,7 @@ To add a dependency `github.com/ncw/new_dependency` see the
|
|||||||
instructions below. These will fetch the dependency and add it to
|
instructions below. These will fetch the dependency and add it to
|
||||||
`go.mod` and `go.sum`.
|
`go.mod` and `go.sum`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go get github.com/ncw/new_dependency
|
go get github.com/ncw/new_dependency
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -522,7 +492,7 @@ and `go.sum` in the same commit as your other changes.
|
|||||||
|
|
||||||
If you need to update a dependency then run
|
If you need to update a dependency then run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go get golang.org/x/crypto
|
go get golang.org/x/crypto
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -611,7 +581,8 @@ remote or an fs.
|
|||||||
- Add your backend to `fstest/test_all/config.yaml`
|
- Add your backend to `fstest/test_all/config.yaml`
|
||||||
- Once you've done that then you can use the integration test framework from
|
- Once you've done that then you can use the integration test framework from
|
||||||
the project root:
|
the project root:
|
||||||
- `go run ./fstest/test_all -backends remote`
|
- go install ./...
|
||||||
|
- test_all -backends remote
|
||||||
|
|
||||||
Or if you want to run the integration tests manually:
|
Or if you want to run the integration tests manually:
|
||||||
|
|
||||||
|
|||||||
22
RELEASE.md
22
RELEASE.md
@@ -60,7 +60,7 @@ If `make updatedirect` added a `toolchain` directive then remove it.
|
|||||||
We don't want to force a toolchain on our users. Linux packagers are
|
We don't want to force a toolchain on our users. Linux packagers are
|
||||||
often using a version of Go that is a few versions out of date.
|
often using a version of Go that is a few versions out of date.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||||
go get -d $(cat /tmp/potential-upgrades)
|
go get -d $(cat /tmp/potential-upgrades)
|
||||||
go mod tidy -go=1.22 -compat=1.22
|
go mod tidy -go=1.22 -compat=1.22
|
||||||
@@ -70,7 +70,7 @@ If the `go mod tidy` fails use the output from it to remove the
|
|||||||
package which can't be upgraded from `/tmp/potential-upgrades` when
|
package which can't be upgraded from `/tmp/potential-upgrades` when
|
||||||
done
|
done
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git co go.mod go.sum
|
git co go.mod go.sum
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -102,7 +102,7 @@ The above procedure will not upgrade major versions, so v2 to v3.
|
|||||||
However this tool can show which major versions might need to be
|
However this tool can show which major versions might need to be
|
||||||
upgraded:
|
upgraded:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
go run github.com/icholy/gomajor@latest list -major
|
go run github.com/icholy/gomajor@latest list -major
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -112,7 +112,7 @@ Expect API breakage when updating major versions.
|
|||||||
|
|
||||||
At some point after the release run
|
At some point after the release run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
bin/tidy-beta v1.55
|
bin/tidy-beta v1.55
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -159,7 +159,7 @@ which is a private repo containing artwork from sponsors.
|
|||||||
|
|
||||||
Create an update website branch based off the last release
|
Create an update website branch based off the last release
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git co -b update-website
|
git co -b update-website
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -167,19 +167,19 @@ If the branch already exists, double check there are no commits that need saving
|
|||||||
|
|
||||||
Now reset the branch to the last release
|
Now reset the branch to the last release
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git reset --hard v1.64.0
|
git reset --hard v1.64.0
|
||||||
```
|
```
|
||||||
|
|
||||||
Create the changes, check them in, test with `make serve` then
|
Create the changes, check them in, test with `make serve` then
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
make upload_test_website
|
make upload_test_website
|
||||||
```
|
```
|
||||||
|
|
||||||
Check out <https://test.rclone.org> and when happy
|
Check out <https://test.rclone.org> and when happy
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
make upload_website
|
make upload_website
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -189,14 +189,14 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
|||||||
|
|
||||||
To do a basic build of rclone's docker image to debug builds locally:
|
To do a basic build of rclone's docker image to debug builds locally:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||||
docker run --rm rclone/rclone:testing version
|
docker run --rm rclone/rclone:testing version
|
||||||
```
|
```
|
||||||
|
|
||||||
To test the multipatform build
|
To test the multipatform build
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -204,6 +204,6 @@ To make a full build then set the tags correctly and add `--push`
|
|||||||
|
|
||||||
Note that you can't only build one architecture - you need to build them all.
|
Note that you can't only build one architecture - you need to build them all.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -48,14 +48,6 @@ type LifecycleRule struct {
|
|||||||
FileNamePrefix string `json:"fileNamePrefix"`
|
FileNamePrefix string `json:"fileNamePrefix"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
|
||||||
type ServerSideEncryption struct {
|
|
||||||
Mode string `json:"mode"`
|
|
||||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
|
||||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
|
||||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||||
// fits in a 64 bit integer such as the type "long" in the programming
|
// fits in a 64 bit integer such as the type "long" in the programming
|
||||||
@@ -269,22 +261,21 @@ type GetFileInfoRequest struct {
|
|||||||
//
|
//
|
||||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||||
type StartLargeFileRequest struct {
|
type StartLargeFileRequest struct {
|
||||||
BucketID string `json:"bucketId"` // The ID of the bucket that the file will go in.
|
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||||
ServerSideEncryption *ServerSideEncryption `json:"serverSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||||
type StartLargeFileResponse struct {
|
type StartLargeFileResponse struct {
|
||||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||||
UploadTimestamp Timestamp `json:"uploadTimestamp,omitempty"` // This is a UTC time when this file was uploaded.
|
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||||
@@ -334,25 +325,21 @@ type CancelLargeFileResponse struct {
|
|||||||
|
|
||||||
// CopyFileRequest is as passed to b2_copy_file
|
// CopyFileRequest is as passed to b2_copy_file
|
||||||
type CopyFileRequest struct {
|
type CopyFileRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
Name string `json:"fileName"` // The name of the new file being created.
|
Name string `json:"fileName"` // The name of the new file being created.
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
// CopyPartRequest is the request for b2_copy_part - the response is UploadPartResponse
|
||||||
type CopyPartRequest struct {
|
type CopyPartRequest struct {
|
||||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||||
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
LargeFileID string `json:"largeFileId"` // The ID of the large file the part will belong to, as returned by b2_start_large_file.
|
||||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||||
SourceServerSideEncryption *ServerSideEncryption `json:"sourceServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the source file
|
|
||||||
DestinationServerSideEncryption *ServerSideEncryption `json:"destinationServerSideEncryption,omitempty"` // A JSON object holding values related to Server-Side Encryption for the destination file
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateBucketRequest describes a request to modify a B2 bucket
|
// UpdateBucketRequest describes a request to modify a B2 bucket
|
||||||
|
|||||||
171
backend/b2/b2.go
171
backend/b2/b2.go
@@ -8,9 +8,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -55,9 +53,6 @@ const (
|
|||||||
nameHeader = "X-Bz-File-Name"
|
nameHeader = "X-Bz-File-Name"
|
||||||
timestampHeader = "X-Bz-Upload-Timestamp"
|
timestampHeader = "X-Bz-Upload-Timestamp"
|
||||||
retryAfterHeader = "Retry-After"
|
retryAfterHeader = "Retry-After"
|
||||||
sseAlgorithmHeader = "X-Bz-Server-Side-Encryption-Customer-Algorithm"
|
|
||||||
sseKeyHeader = "X-Bz-Server-Side-Encryption-Customer-Key"
|
|
||||||
sseMd5Header = "X-Bz-Server-Side-Encryption-Customer-Key-Md5"
|
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 5 * time.Minute
|
maxSleep = 5 * time.Minute
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
@@ -72,7 +67,7 @@ const (
|
|||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
errNotWithVersions = errors.New("can't modify files in --b2-versions mode")
|
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -257,51 +252,6 @@ See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket
|
|||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeBackSlash |
|
encoder.EncodeBackSlash |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}, {
|
|
||||||
Name: "sse_customer_algorithm",
|
|
||||||
Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in B2.",
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "",
|
|
||||||
Help: "None",
|
|
||||||
}, {
|
|
||||||
Value: "AES256",
|
|
||||||
Help: "Advanced Encryption Standard (256 bits key length)",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "sse_customer_key",
|
|
||||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in a UTF-8 compatible string to encrypt/decrypt your data
|
|
||||||
|
|
||||||
Alternatively you can provide --sse-customer-key-base64.`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "",
|
|
||||||
Help: "None",
|
|
||||||
}},
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "sse_customer_key_base64",
|
|
||||||
Help: `To use SSE-C, you may provide the secret encryption key encoded in Base64 format to encrypt/decrypt your data
|
|
||||||
|
|
||||||
Alternatively you can provide --sse-customer-key.`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "",
|
|
||||||
Help: "None",
|
|
||||||
}},
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
|
||||||
Name: "sse_customer_key_md5",
|
|
||||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
|
||||||
|
|
||||||
If you leave it blank, this is calculated automatically from the sse_customer_key provided.
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "",
|
|
||||||
Help: "None",
|
|
||||||
}},
|
|
||||||
Sensitive: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -324,10 +274,6 @@ type Options struct {
|
|||||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||||
Lifecycle int `config:"lifecycle"`
|
Lifecycle int `config:"lifecycle"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
|
||||||
SSECustomerKey string `config:"sse_customer_key"`
|
|
||||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
|
||||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote b2 server
|
// Fs represents a remote b2 server
|
||||||
@@ -558,24 +504,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.Endpoint == "" {
|
if opt.Endpoint == "" {
|
||||||
opt.Endpoint = defaultEndpoint
|
opt.Endpoint = defaultEndpoint
|
||||||
}
|
}
|
||||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyBase64 != "" {
|
|
||||||
return nil, errors.New("b2: can't use both sse_customer_key and sse_customer_key_base64 at the same time")
|
|
||||||
} else if opt.SSECustomerKeyBase64 != "" {
|
|
||||||
// Decode the Base64-encoded key and store it in the SSECustomerKey field
|
|
||||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("b2: Could not decode sse_customer_key_base64: %w", err)
|
|
||||||
}
|
|
||||||
opt.SSECustomerKey = string(decoded)
|
|
||||||
} else {
|
|
||||||
// Encode the raw key as Base64
|
|
||||||
opt.SSECustomerKeyBase64 = base64.StdEncoding.EncodeToString([]byte(opt.SSECustomerKey))
|
|
||||||
}
|
|
||||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
|
||||||
// Calculate CustomerKeyMd5 if not supplied
|
|
||||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
|
||||||
opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
|
||||||
}
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
@@ -1507,16 +1435,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
|||||||
Name: f.opt.Enc.FromStandardPath(dstPath),
|
Name: f.opt.Enc.FromStandardPath(dstPath),
|
||||||
DestBucketID: destBucketID,
|
DestBucketID: destBucketID,
|
||||||
}
|
}
|
||||||
if f.opt.SSECustomerKey != "" && f.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: f.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: f.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: f.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
}
|
|
||||||
if newInfo == nil {
|
if newInfo == nil {
|
||||||
request.MetadataDirective = "COPY"
|
request.MetadataDirective = "COPY"
|
||||||
} else {
|
} else {
|
||||||
@@ -1948,10 +1866,9 @@ var _ io.ReadCloser = &openFile{}
|
|||||||
|
|
||||||
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: method,
|
Method: method,
|
||||||
Options: options,
|
Options: options,
|
||||||
NoResponse: method == "HEAD",
|
NoResponse: method == "HEAD",
|
||||||
ExtraHeaders: map[string]string{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||||
@@ -1969,11 +1886,6 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
|||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
|
||||||
}
|
}
|
||||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
|
||||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
|
||||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
|
||||||
}
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetry(ctx, resp, err)
|
return o.fs.shouldRetry(ctx, resp, err)
|
||||||
@@ -2238,11 +2150,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
},
|
},
|
||||||
ContentLength: &size,
|
ContentLength: &size,
|
||||||
}
|
}
|
||||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
opts.ExtraHeaders[sseAlgorithmHeader] = o.fs.opt.SSECustomerAlgorithm
|
|
||||||
opts.ExtraHeaders[sseKeyHeader] = o.fs.opt.SSECustomerKeyBase64
|
|
||||||
opts.ExtraHeaders[sseMd5Header] = o.fs.opt.SSECustomerKeyMD5
|
|
||||||
}
|
|
||||||
var response api.FileInfo
|
var response api.FileInfo
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
@@ -2334,10 +2241,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
t, path := api.RemoveVersion(bucketPath)
|
return errNotWithVersions
|
||||||
if !t.IsZero() {
|
|
||||||
return o.fs.deleteByID(ctx, o.id, path)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if o.fs.opt.VersionAt.IsSet() {
|
if o.fs.opt.VersionAt.IsSet() {
|
||||||
return errNotWithVersionAt
|
return errNotWithVersionAt
|
||||||
@@ -2360,36 +2264,32 @@ func (o *Object) ID() string {
|
|||||||
|
|
||||||
var lifecycleHelp = fs.CommandHelp{
|
var lifecycleHelp = fs.CommandHelp{
|
||||||
Name: "lifecycle",
|
Name: "lifecycle",
|
||||||
Short: "Read or set the lifecycle for a bucket.",
|
Short: "Read or set the lifecycle for a bucket",
|
||||||
Long: `This command can be used to read or set the lifecycle for a bucket.
|
Long: `This command can be used to read or set the lifecycle for a bucket.
|
||||||
|
|
||||||
|
Usage Examples:
|
||||||
|
|
||||||
To show the current lifecycle rules:
|
To show the current lifecycle rules:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend lifecycle b2:bucket
|
||||||
rclone backend lifecycle b2:bucket
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This will dump something like this showing the lifecycle rules.
|
This will dump something like this showing the lifecycle rules.
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"daysFromHidingToDeleting": 1,
|
||||||
"daysFromHidingToDeleting": 1,
|
"daysFromUploadingToHiding": null,
|
||||||
"daysFromUploadingToHiding": null,
|
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
|
"fileNamePrefix": ""
|
||||||
"fileNamePrefix": ""
|
}
|
||||||
}
|
]
|
||||||
]
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
If there are no lifecycle rules (the default) then it will just return ` + "`[]`" + `.
|
If there are no lifecycle rules (the default) then it will just return [].
|
||||||
|
|
||||||
To reset the current lifecycle rules:
|
To reset the current lifecycle rules:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
|
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
||||||
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This will run and then print the new lifecycle rules as above.
|
This will run and then print the new lifecycle rules as above.
|
||||||
|
|
||||||
@@ -2401,17 +2301,14 @@ the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
|
|||||||
the config also which will mean deletions won't cause versions but
|
the config also which will mean deletions won't cause versions but
|
||||||
overwrites will still cause versions to be made.
|
overwrites will still cause versions to be made.
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
||||||
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
See: <https://www.backblaze.com/docs/cloud-storage-lifecycle-rules>`,
|
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
||||||
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"daysFromHidingToDeleting": `After a file has been hidden for this many days
|
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
|
||||||
it is deleted. 0 is off.`,
|
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
|
||||||
"daysFromUploadingToHiding": `This many days after uploading a file is hidden.`,
|
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
|
||||||
"daysFromStartingToCancelingUnfinishedLargeFiles": `Cancels any unfinished
|
|
||||||
large file versions after this many days.`,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2494,14 +2391,13 @@ max-age, which defaults to 24 hours.
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend cleanup b2:bucket/path/to/object
|
||||||
rclone backend cleanup b2:bucket/path/to/object
|
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
||||||
rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete.",
|
"max-age": "Max age of upload to delete",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2524,9 +2420,8 @@ var cleanupHiddenHelp = fs.CommandHelp{
|
|||||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
it would do.
|
it would do.
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
`,
|
||||||
` + "```",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
|
|||||||
@@ -144,14 +144,6 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
request.ContentType = newInfo.ContentType
|
request.ContentType = newInfo.ContentType
|
||||||
request.Info = newInfo.Info
|
request.Info = newInfo.Info
|
||||||
}
|
}
|
||||||
if o.fs.opt.SSECustomerKey != "" && o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
request.ServerSideEncryption = &api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: o.fs.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_start_large_file",
|
Path: "/b2_start_large_file",
|
||||||
@@ -303,12 +295,6 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
|||||||
ContentLength: &sizeWithHash,
|
ContentLength: &sizeWithHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
opts.ExtraHeaders[sseAlgorithmHeader] = up.o.fs.opt.SSECustomerAlgorithm
|
|
||||||
opts.ExtraHeaders[sseKeyHeader] = up.o.fs.opt.SSECustomerKeyBase64
|
|
||||||
opts.ExtraHeaders[sseMd5Header] = up.o.fs.opt.SSECustomerKeyMD5
|
|
||||||
}
|
|
||||||
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
|
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||||
@@ -348,17 +334,6 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
|||||||
PartNumber: int64(part + 1),
|
PartNumber: int64(part + 1),
|
||||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||||
}
|
}
|
||||||
|
|
||||||
if up.o.fs.opt.SSECustomerKey != "" && up.o.fs.opt.SSECustomerKeyMD5 != "" {
|
|
||||||
serverSideEncryptionConfig := api.ServerSideEncryption{
|
|
||||||
Mode: "SSE-C",
|
|
||||||
Algorithm: up.o.fs.opt.SSECustomerAlgorithm,
|
|
||||||
CustomerKey: up.o.fs.opt.SSECustomerKeyBase64,
|
|
||||||
CustomerKeyMd5: up.o.fs.opt.SSECustomerKeyMD5,
|
|
||||||
}
|
|
||||||
request.SourceServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
request.DestinationServerSideEncryption = &serverSideEncryptionConfig
|
|
||||||
}
|
|
||||||
var response api.UploadPartResponse
|
var response api.UploadPartResponse
|
||||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||||
|
|||||||
@@ -87,11 +87,13 @@ func init() {
|
|||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||||
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||||
var err error
|
var err error
|
||||||
// If using box config.json, use JWT auth
|
// If using box config.json, use JWT auth
|
||||||
if usesJWTAuth(m) {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
err = refreshJWTToken(ctx, name, m)
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||||
}
|
}
|
||||||
@@ -112,11 +114,6 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "box_config_file",
|
Name: "box_config_file",
|
||||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||||
}, {
|
|
||||||
Name: "config_credentials",
|
|
||||||
Help: "Box App config.json contents.\n\nLeave blank normally.",
|
|
||||||
Hide: fs.OptionHideBoth,
|
|
||||||
Sensitive: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "access_token",
|
Name: "access_token",
|
||||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||||
@@ -187,17 +184,9 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func usesJWTAuth(m configmap.Mapper) bool {
|
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||||
jsonFile, okFile := m.Get("box_config_file")
|
jsonFile = env.ShellExpand(jsonFile)
|
||||||
jsonFileCredentials, okCredentials := m.Get("config_credentials")
|
boxConfig, err := getBoxConfig(jsonFile)
|
||||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
|
||||||
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
|
||||||
boxSubType, _ := m.Get("box_sub_type")
|
|
||||||
|
|
||||||
boxConfig, err := getBoxConfig(m)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get box config: %w", err)
|
return fmt.Errorf("get box config: %w", err)
|
||||||
}
|
}
|
||||||
@@ -216,19 +205,12 @@ func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) {
|
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||||
configFileCredentials, _ := m.Get("config_credentials")
|
file, err := os.ReadFile(configFile)
|
||||||
configFileBytes := []byte(configFileCredentials)
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||||
if configFileCredentials == "" {
|
|
||||||
configFile, _ := m.Get("box_config_file")
|
|
||||||
configFileBytes, err = os.ReadFile(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
err = json.Unmarshal(file, &boxConfig)
|
||||||
err = json.Unmarshal(configFileBytes, &boxConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||||
}
|
}
|
||||||
@@ -503,12 +485,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
jsonFile, ok := m.Get("box_config_file")
|
||||||
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||||
|
|
||||||
if ts != nil {
|
if ts != nil {
|
||||||
// If using box config.json and JWT, renewing should just refresh the token and
|
// If using box config.json and JWT, renewing should just refresh the token and
|
||||||
// should do so whether there are uploads pending or not.
|
// should do so whether there are uploads pending or not.
|
||||||
if usesJWTAuth(m) {
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
err := refreshJWTToken(ctx, name, m)
|
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
f.tokenRenewer.Start()
|
f.tokenRenewer.Start()
|
||||||
|
|||||||
@@ -2,8 +2,10 @@
|
|||||||
package compress
|
package compress
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -44,7 +46,6 @@ const (
|
|||||||
minCompressionRatio = 1.1
|
minCompressionRatio = 1.1
|
||||||
|
|
||||||
gzFileExt = ".gz"
|
gzFileExt = ".gz"
|
||||||
zstdFileExt = ".zst"
|
|
||||||
metaFileExt = ".json"
|
metaFileExt = ".json"
|
||||||
uncompressedFileExt = ".bin"
|
uncompressedFileExt = ".bin"
|
||||||
)
|
)
|
||||||
@@ -53,7 +54,6 @@ const (
|
|||||||
const (
|
const (
|
||||||
Uncompressed = 0
|
Uncompressed = 0
|
||||||
Gzip = 2
|
Gzip = 2
|
||||||
Zstd = 4
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`)
|
||||||
@@ -66,10 +66,6 @@ func init() {
|
|||||||
Value: "gzip",
|
Value: "gzip",
|
||||||
Help: "Standard gzip compression with fastest parameters.",
|
Help: "Standard gzip compression with fastest parameters.",
|
||||||
},
|
},
|
||||||
{
|
|
||||||
Value: "zstd",
|
|
||||||
Help: "Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register our remote
|
// Register our remote
|
||||||
@@ -91,23 +87,17 @@ func init() {
|
|||||||
Examples: compressionModeOptions,
|
Examples: compressionModeOptions,
|
||||||
}, {
|
}, {
|
||||||
Name: "level",
|
Name: "level",
|
||||||
Help: `GZIP (levels -2 to 9):
|
Help: `GZIP compression level (-2 to 9).
|
||||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
|
||||||
- -1 (default) — recommended; equivalent to level 5.
|
Generally -1 (default, equivalent to 5) is recommended.
|
||||||
- 0 — turns off compression.
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
ZSTD (levels 0 to 4):
|
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||||
- 0 — turns off compression entirely.
|
are doing.
|
||||||
- 1 — fastest compression with the lowest ratio.
|
Level 0 turns off compression.`,
|
||||||
- 2 (default) — good balance of speed and compression.
|
Default: sgzip.DefaultCompression,
|
||||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
Advanced: true,
|
||||||
- 4 — best possible compression ratio (highest CPU cost).
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
|
||||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).`,
|
|
||||||
Required: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "ram_cache_limit",
|
Name: "ram_cache_limit",
|
||||||
Help: `Some remotes don't allow the upload of files with unknown size.
|
Help: `Some remotes don't allow the upload of files with unknown size.
|
||||||
@@ -122,47 +112,6 @@ this limit will be cached on disk.`,
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// compressionModeHandler defines the interface for handling different compression modes
|
|
||||||
type compressionModeHandler interface {
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
processFileNameGetFileExtension(compressionMode int) string
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error)
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
isCompressible(r io.Reader, compressionMode int) (bool, error)
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error)
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error)
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error)
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
@@ -176,13 +125,12 @@ type Options struct {
|
|||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
wrapper fs.Fs
|
wrapper fs.Fs
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
opt Options
|
opt Options
|
||||||
mode int // compression mode id
|
mode int // compression mode id
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
modeHandler compressionModeHandler // compression mode handler
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
@@ -219,28 +167,13 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
compressionMode := compressionModeFromName(opt.CompressionMode)
|
|
||||||
var modeHandler compressionModeHandler
|
|
||||||
|
|
||||||
switch compressionMode {
|
|
||||||
case Gzip:
|
|
||||||
modeHandler = &gzipModeHandler{}
|
|
||||||
case Zstd:
|
|
||||||
modeHandler = &zstdModeHandler{}
|
|
||||||
case Uncompressed:
|
|
||||||
modeHandler = &uncompressedModeHandler{}
|
|
||||||
default:
|
|
||||||
modeHandler = &unknownModeHandler{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the wrapping fs
|
// Create the wrapping fs
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
mode: compressionMode,
|
mode: compressionModeFromName(opt.CompressionMode),
|
||||||
modeHandler: modeHandler,
|
|
||||||
}
|
}
|
||||||
// Correct root if definitely pointing to a file
|
// Correct root if definitely pointing to a file
|
||||||
if err == fs.ErrorIsFile {
|
if err == fs.ErrorIsFile {
|
||||||
@@ -282,13 +215,10 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
|||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// compressionModeFromName converts a compression mode name to its int representation.
|
|
||||||
func compressionModeFromName(name string) int {
|
func compressionModeFromName(name string) int {
|
||||||
switch name {
|
switch name {
|
||||||
case "gzip":
|
case "gzip":
|
||||||
return Gzip
|
return Gzip
|
||||||
case "zstd":
|
|
||||||
return Zstd
|
|
||||||
default:
|
default:
|
||||||
return Uncompressed
|
return Uncompressed
|
||||||
}
|
}
|
||||||
@@ -312,7 +242,7 @@ func base64ToInt64(str string) (int64, error) {
|
|||||||
|
|
||||||
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
// Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file.
|
||||||
// Returns -2 for the original size if the file is uncompressed.
|
// Returns -2 for the original size if the file is uncompressed.
|
||||||
func processFileName(compressedFileName string, modeHandler compressionModeHandler) (origFileName string, extension string, origSize int64, err error) {
|
func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) {
|
||||||
// Separate the filename and size from the extension
|
// Separate the filename and size from the extension
|
||||||
extensionPos := strings.LastIndex(compressedFileName, ".")
|
extensionPos := strings.LastIndex(compressedFileName, ".")
|
||||||
if extensionPos == -1 {
|
if extensionPos == -1 {
|
||||||
@@ -331,8 +261,7 @@ func processFileName(compressedFileName string, modeHandler compressionModeHandl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", 0, errors.New("could not decode size")
|
return "", "", 0, errors.New("could not decode size")
|
||||||
}
|
}
|
||||||
ext := modeHandler.processFileNameGetFileExtension(compressionModeFromName(compressedFileName[extensionPos+1:]))
|
return match[1], gzFileExt, size, nil
|
||||||
return match[1], ext, size, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates the file name for a metadata file
|
// Generates the file name for a metadata file
|
||||||
@@ -357,15 +286,11 @@ func unwrapMetadataFile(filename string) (string, bool) {
|
|||||||
|
|
||||||
// makeDataName generates the file name for a data file with specified compression mode
|
// makeDataName generates the file name for a data file with specified compression mode
|
||||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||||
switch mode {
|
if mode != Uncompressed {
|
||||||
case Gzip:
|
|
||||||
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
newRemote = remote + "." + int64ToBase64(size) + gzFileExt
|
||||||
case Zstd:
|
} else {
|
||||||
newRemote = remote + "." + int64ToBase64(size) + zstdFileExt
|
|
||||||
default:
|
|
||||||
newRemote = remote + uncompressedFileExt
|
newRemote = remote + uncompressedFileExt
|
||||||
}
|
}
|
||||||
|
|
||||||
return newRemote
|
return newRemote
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,7 +304,7 @@ func (f *Fs) dataName(remote string, size int64, compressed bool) (name string)
|
|||||||
|
|
||||||
// addData parses an object and adds it to the DirEntries
|
// addData parses an object and adds it to the DirEntries
|
||||||
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) {
|
||||||
origFileName, _, size, err := processFileName(o.Remote(), f.modeHandler)
|
origFileName, _, size, err := processFileName(o.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Error on parsing file name: %v", err)
|
fs.Errorf(o, "Error on parsing file name: %v", err)
|
||||||
return
|
return
|
||||||
@@ -502,12 +427,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||||
}
|
}
|
||||||
size, err := f.modeHandler.newObjectGetOriginalSize(meta)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error reading metadata: %w", err)
|
|
||||||
}
|
|
||||||
// Create our Object
|
// Create our Object
|
||||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, size, meta.Mode))
|
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -516,7 +437,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||||
// returns a multireader with the bytes that were read to determine mime type
|
// returns a multireader with the bytes that were read to determine mime type
|
||||||
func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compressionModeHandler) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) {
|
||||||
in, wrap := accounting.UnWrap(in)
|
in, wrap := accounting.UnWrap(in)
|
||||||
buf := make([]byte, heuristicBytes)
|
buf := make([]byte, heuristicBytes)
|
||||||
n, err := in.Read(buf)
|
n, err := in.Read(buf)
|
||||||
@@ -525,7 +446,7 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
|
|||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
mime := mimetype.Detect(buf)
|
mime := mimetype.Detect(buf)
|
||||||
compressible, err = modeHandler.isCompressible(bytes.NewReader(buf), compressionMode)
|
compressible, err = isCompressible(bytes.NewReader(buf))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, "", err
|
return nil, false, "", err
|
||||||
}
|
}
|
||||||
@@ -533,6 +454,26 @@ func checkCompressAndType(in io.Reader, compressionMode int, modeHandler compres
|
|||||||
return wrap(in), compressible, mime.String(), nil
|
return wrap(in), compressible, mime.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
||||||
|
// the configured threshold
|
||||||
|
func isCompressible(r io.Reader) (bool, error) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
n, err := io.Copy(w, r)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
err = w.Close()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
ratio := float64(n) / float64(b.Len())
|
||||||
|
return ratio > minCompressionRatio, nil
|
||||||
|
}
|
||||||
|
|
||||||
// verifyObjectHash verifies the Objects hash
|
// verifyObjectHash verifies the Objects hash
|
||||||
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
@@ -553,9 +494,9 @@ func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.Mul
|
|||||||
|
|
||||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
type compressionResult[T sgzip.GzipMetadata | SzstdMetadata] struct {
|
type compressionResult struct {
|
||||||
err error
|
err error
|
||||||
meta T
|
meta sgzip.GzipMetadata
|
||||||
}
|
}
|
||||||
|
|
||||||
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
// replicating some of operations.Rcat functionality because we want to support remotes without streaming
|
||||||
@@ -596,18 +537,106 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
return nil, fmt.Errorf("failed to write temporary local file: %w", err)
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, 0); err != nil {
|
if _, err = tempFile.Seek(0, 0); err != nil {
|
||||||
return nil, fmt.Errorf("failed to seek temporary local file: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
finfo, err := tempFile.Stat()
|
finfo, err := tempFile.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to stat temporary local file: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
// Put a compressed version of a file. Returns a wrappable object and metadata.
|
||||||
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) {
|
||||||
return f.modeHandler.putCompress(ctx, f, in, src, options, mimeType)
|
// Unwrap reader accounting
|
||||||
|
in, wrap := accounting.UnWrap(in)
|
||||||
|
|
||||||
|
// Add the metadata hasher
|
||||||
|
metaHasher := md5.New()
|
||||||
|
in = io.TeeReader(in, metaHasher)
|
||||||
|
|
||||||
|
// Compress the file
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
results := make(chan compressionResult)
|
||||||
|
go func() {
|
||||||
|
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
||||||
|
if err != nil {
|
||||||
|
results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = io.Copy(gz, in)
|
||||||
|
gzErr := gz.Close()
|
||||||
|
if gzErr != nil {
|
||||||
|
fs.Errorf(nil, "Failed to close compress: %v", gzErr)
|
||||||
|
if err == nil {
|
||||||
|
err = gzErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
closeErr := pipeWriter.Close()
|
||||||
|
if closeErr != nil {
|
||||||
|
fs.Errorf(nil, "Failed to close pipe: %v", closeErr)
|
||||||
|
if err == nil {
|
||||||
|
err = closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
results <- compressionResult{err: err, meta: gz.MetaData()}
|
||||||
|
}()
|
||||||
|
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
||||||
|
|
||||||
|
// Find a hash the destination supports to compute a hash of
|
||||||
|
// the compressed data.
|
||||||
|
ht := f.Fs.Hashes().GetOne()
|
||||||
|
var hasher *hash.MultiHasher
|
||||||
|
var err error
|
||||||
|
if ht != hash.None {
|
||||||
|
// unwrap the accounting again
|
||||||
|
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
||||||
|
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// add the hasher and re-wrap the accounting
|
||||||
|
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||||
|
wrappedIn = wrap(wrappedIn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transfer the data
|
||||||
|
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||||
|
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||||
|
if err != nil {
|
||||||
|
if o != nil {
|
||||||
|
removeErr := o.Remove(ctx)
|
||||||
|
if removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially transferred object: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
// Check whether we got an error during compression
|
||||||
|
result := <-results
|
||||||
|
err = result.err
|
||||||
|
if err != nil {
|
||||||
|
if o != nil {
|
||||||
|
removeErr := o.Remove(ctx)
|
||||||
|
if removeErr != nil {
|
||||||
|
fs.Errorf(o, "Failed to remove partially compressed object: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate metadata
|
||||||
|
meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
||||||
|
|
||||||
|
// Check the hashes of the compressed data if we were comparing them
|
||||||
|
if ht != hash.None && hasher != nil {
|
||||||
|
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, meta, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
// Put an uncompressed version of a file. Returns a wrappable object and metadata.
|
||||||
@@ -651,8 +680,7 @@ func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
||||||
return f.modeHandler.putUncompressGetNewMetadata(o, Uncompressed, hex.EncodeToString(sum), mimeType, sum)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
// This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object.
|
||||||
@@ -723,7 +751,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
o, err := f.NewObject(ctx, src.Remote())
|
o, err := f.NewObject(ctx, src.Remote())
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// Get our file compressibility
|
// Get our file compressibility
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -743,7 +771,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
found := err == nil
|
found := err == nil
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, f.mode, f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1062,12 +1090,11 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration
|
|||||||
|
|
||||||
// ObjectMetadata describes the metadata for an Object.
|
// ObjectMetadata describes the metadata for an Object.
|
||||||
type ObjectMetadata struct {
|
type ObjectMetadata struct {
|
||||||
Mode int // Compression mode of the file.
|
Mode int // Compression mode of the file.
|
||||||
Size int64 // Size of the object.
|
Size int64 // Size of the object.
|
||||||
MD5 string // MD5 hash of the file.
|
MD5 string // MD5 hash of the file.
|
||||||
MimeType string // Mime type of the file
|
MimeType string // Mime type of the file
|
||||||
CompressionMetadataGzip *sgzip.GzipMetadata // Metadata for Gzip compression
|
CompressionMetadata sgzip.GzipMetadata
|
||||||
CompressionMetadataZstd *SzstdMetadata // Metadata for Zstd compression
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object with external metadata
|
// Object with external metadata
|
||||||
@@ -1080,6 +1107,17 @@ type Object struct {
|
|||||||
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
meta *ObjectMetadata // Metadata struct for this object (nil if not loaded)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This function generates a metadata object
|
||||||
|
func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata {
|
||||||
|
meta := new(ObjectMetadata)
|
||||||
|
meta.Size = size
|
||||||
|
meta.Mode = mode
|
||||||
|
meta.CompressionMetadata = cmeta
|
||||||
|
meta.MD5 = md5
|
||||||
|
meta.MimeType = mimeType
|
||||||
|
return meta
|
||||||
|
}
|
||||||
|
|
||||||
// This function will read the metadata from a metadata object.
|
// This function will read the metadata from a metadata object.
|
||||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||||
// Open our meradata object
|
// Open our meradata object
|
||||||
@@ -1127,7 +1165,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return o.mo, o.mo.Update(ctx, in, src, options...)
|
return o.mo, o.mo.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
in, compressible, mimeType, err := checkCompressAndType(in, o.meta.Mode, o.f.modeHandler)
|
in, compressible, mimeType, err := checkCompressAndType(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1240,7 +1278,7 @@ func (o *Object) String() string {
|
|||||||
|
|
||||||
// Remote returns the remote path
|
// Remote returns the remote path
|
||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
origFileName, _, _, err := processFileName(o.Object.Remote(), o.f.modeHandler)
|
origFileName, _, _, err := processFileName(o.Object.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote())
|
||||||
return o.Object.Remote()
|
return o.Object.Remote()
|
||||||
@@ -1343,6 +1381,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return o.Object.Open(ctx, options...)
|
return o.Object.Open(ctx, options...)
|
||||||
}
|
}
|
||||||
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
// Get offset and limit from OpenOptions, pass the rest to the underlying remote
|
||||||
|
var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}}
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@@ -1350,12 +1389,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
offset = x.Offset
|
offset = x.Offset
|
||||||
case *fs.RangeOption:
|
case *fs.RangeOption:
|
||||||
offset, limit = x.Decode(o.Size())
|
offset, limit = x.Decode(o.Size())
|
||||||
|
default:
|
||||||
|
openOptions = append(openOptions, option)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Get a chunkedreader for the wrapped object
|
// Get a chunkedreader for the wrapped object
|
||||||
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize, chunkStreams)
|
||||||
var retCloser io.Closer = chunkedReader
|
// Get file handle
|
||||||
return o.f.modeHandler.openGetReadCloser(ctx, o, offset, limit, chunkedReader, retCloser, options...)
|
var file io.Reader
|
||||||
|
if offset != 0 {
|
||||||
|
file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset)
|
||||||
|
} else {
|
||||||
|
file, err = sgzip.NewReader(chunkedReader)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileReader io.Reader
|
||||||
|
if limit != -1 {
|
||||||
|
fileReader = io.LimitReader(file, limit)
|
||||||
|
} else {
|
||||||
|
fileReader = file
|
||||||
|
}
|
||||||
|
// Return a ReadCloser
|
||||||
|
return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||||
|
|||||||
@@ -48,27 +48,7 @@ func TestRemoteGzip(t *testing.T) {
|
|||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
{Name: name, Key: "type", Value: "compress"},
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
{Name: name, Key: "remote", Value: tempdir},
|
||||||
{Name: name, Key: "mode", Value: "gzip"},
|
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||||
{Name: name, Key: "level", Value: "-1"},
|
|
||||||
}
|
|
||||||
opt.QuickTestOK = true
|
|
||||||
fstests.Run(t, &opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRemoteZstd tests ZSTD compression
|
|
||||||
func TestRemoteZstd(t *testing.T) {
|
|
||||||
if *fstest.RemoteName != "" {
|
|
||||||
t.Skip("Skipping as -remote set")
|
|
||||||
}
|
|
||||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-zstd")
|
|
||||||
name := "TestCompressZstd"
|
|
||||||
opt := defaultOpt
|
|
||||||
opt.RemoteName = name + ":"
|
|
||||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
|
||||||
{Name: name, Key: "type", Value: "compress"},
|
|
||||||
{Name: name, Key: "remote", Value: tempdir},
|
|
||||||
{Name: name, Key: "mode", Value: "zstd"},
|
|
||||||
{Name: name, Key: "level", Value: "2"},
|
|
||||||
}
|
}
|
||||||
opt.QuickTestOK = true
|
opt.QuickTestOK = true
|
||||||
fstests.Run(t, &opt)
|
fstests.Run(t, &opt)
|
||||||
|
|||||||
@@ -1,207 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/buengese/sgzip"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// gzipModeHandler implements compressionModeHandler for gzip
|
|
||||||
type gzipModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (g *gzipModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (g *gzipModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataGzip == nil {
|
|
||||||
return 0, errors.New("missing gzip metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataGzip.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (g *gzipModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = sgzip.NewReaderAt(cr, o.meta.CompressionMetadataGzip, offset)
|
|
||||||
} else {
|
|
||||||
file, err = sgzip.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (g *gzipModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Gzip {
|
|
||||||
return gzFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (g *gzipModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsGzip := make(chan compressionResult[sgzip.GzipMetadata])
|
|
||||||
go func() {
|
|
||||||
gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel)
|
|
||||||
if err != nil {
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: sgzip.GzipMetadata{}}
|
|
||||||
close(resultsGzip)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(gz, in)
|
|
||||||
gzErr := gz.Close()
|
|
||||||
if gzErr != nil && err == nil {
|
|
||||||
err = gzErr
|
|
||||||
}
|
|
||||||
closeErr := pipeWriter.Close()
|
|
||||||
if closeErr != nil && err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
resultsGzip <- compressionResult[sgzip.GzipMetadata]{err: err, meta: gz.MetaData()}
|
|
||||||
close(resultsGzip)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering
|
|
||||||
|
|
||||||
// Find a hash the destination supports to compute a hash of
|
|
||||||
// the compressed data.
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
// unwrap the accounting again
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// add the hasher and re-wrap the accounting
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transfer the data
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially transferred object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
// Check whether we got an error during compression
|
|
||||||
result := <-resultsGzip
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
if removeErr := o.Remove(ctx); removeErr != nil {
|
|
||||||
fs.Errorf(o, "Failed to remove partially compressed object: %v", removeErr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate metadata
|
|
||||||
meta := g.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
|
|
||||||
// Check the hashes of the compressed data if we were comparing them
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (g *gzipModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, g.newMetadata(o.Size(), mode, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (g *gzipModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(sgzip.GzipMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected sgzip.GzipMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = &meta
|
|
||||||
objMeta.CompressionMetadataZstd = nil
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -1,327 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
szstd "github.com/a1ex3/zstd-seekable-format-go/pkg"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
)
|
|
||||||
|
|
||||||
const szstdChunkSize int = 1 << 20 // 1 MiB chunk size
|
|
||||||
|
|
||||||
// SzstdMetadata holds metadata for szstd compressed files.
|
|
||||||
type SzstdMetadata struct {
|
|
||||||
BlockSize int // BlockSize is the size of the blocks in the zstd file
|
|
||||||
Size int64 // Size is the uncompressed size of the file
|
|
||||||
BlockData []uint32 // BlockData is the block data for the zstd file, used for seeking
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdWriter is a writer that compresses data in szstd format.
|
|
||||||
type SzstdWriter struct {
|
|
||||||
enc *zstd.Encoder
|
|
||||||
w szstd.ConcurrentWriter
|
|
||||||
metadata SzstdMetadata
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriterSzstd creates a new szstd writer with the specified options.
|
|
||||||
// It initializes the szstd writer with a zstd encoder and returns a pointer to the SzstdWriter.
|
|
||||||
// The writer can be used to write data in chunks, and it will automatically handle block sizes and metadata.
|
|
||||||
func NewWriterSzstd(w io.Writer, opts ...zstd.EOption) (*SzstdWriter, error) {
|
|
||||||
encoder, err := zstd.NewWriter(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sw, err := szstd.NewWriter(w, encoder)
|
|
||||||
if err != nil {
|
|
||||||
if err := encoder.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &SzstdWriter{
|
|
||||||
enc: encoder,
|
|
||||||
w: sw,
|
|
||||||
metadata: SzstdMetadata{
|
|
||||||
BlockSize: szstdChunkSize,
|
|
||||||
Size: 0,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes data to the szstd writer in chunks of szstdChunkSize.
|
|
||||||
// It handles the block size and metadata updates automatically.
|
|
||||||
func (w *SzstdWriter) Write(p []byte) (int, error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.metadata.BlockData == nil {
|
|
||||||
numBlocks := (len(p) + w.metadata.BlockSize - 1) / w.metadata.BlockSize
|
|
||||||
w.metadata.BlockData = make([]uint32, 1, numBlocks+1)
|
|
||||||
w.metadata.BlockData[0] = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
start := 0
|
|
||||||
total := len(p)
|
|
||||||
|
|
||||||
var writerFunc szstd.FrameSource = func() ([]byte, error) {
|
|
||||||
if start >= total {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
end := min(start+w.metadata.BlockSize, total)
|
|
||||||
chunk := p[start:end]
|
|
||||||
size := end - start
|
|
||||||
|
|
||||||
w.mu.Lock()
|
|
||||||
w.metadata.Size += int64(size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
start = end
|
|
||||||
return chunk, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// write sizes of compressed blocks in the callback
|
|
||||||
err := w.w.WriteMany(context.Background(), writerFunc,
|
|
||||||
szstd.WithWriteCallback(func(size uint32) {
|
|
||||||
w.mu.Lock()
|
|
||||||
lastOffset := w.metadata.BlockData[len(w.metadata.BlockData)-1]
|
|
||||||
w.metadata.BlockData = append(w.metadata.BlockData, lastOffset+size)
|
|
||||||
w.mu.Unlock()
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdWriter and its underlying encoder.
|
|
||||||
func (w *SzstdWriter) Close() error {
|
|
||||||
if err := w.w.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := w.enc.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetadata returns the metadata of the szstd writer.
|
|
||||||
func (w *SzstdWriter) GetMetadata() SzstdMetadata {
|
|
||||||
return w.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// SzstdReaderAt is a reader that allows random access in szstd compressed data.
|
|
||||||
type SzstdReaderAt struct {
|
|
||||||
r szstd.Reader
|
|
||||||
decoder *zstd.Decoder
|
|
||||||
metadata *SzstdMetadata
|
|
||||||
pos int64
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderAtSzstd creates a new SzstdReaderAt at the specified io.ReadSeeker.
|
|
||||||
func NewReaderAtSzstd(rs io.ReadSeeker, meta *SzstdMetadata, offset int64, opts ...zstd.DOption) (*SzstdReaderAt, error) {
|
|
||||||
decoder, err := zstd.NewReader(nil, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := szstd.NewReader(rs, decoder)
|
|
||||||
if err != nil {
|
|
||||||
decoder.Close()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sr := &SzstdReaderAt{
|
|
||||||
r: r,
|
|
||||||
decoder: decoder,
|
|
||||||
metadata: meta,
|
|
||||||
pos: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set initial position to the provided offset
|
|
||||||
if _, err := sr.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
if err := sr.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return sr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Seek sets the offset for the next Read.
|
|
||||||
func (s *SzstdReaderAt) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
pos, err := s.r.Seek(offset, whence)
|
|
||||||
if err == nil {
|
|
||||||
s.pos = pos
|
|
||||||
}
|
|
||||||
return pos, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SzstdReaderAt) Read(p []byte) (int, error) {
|
|
||||||
s.mu.Lock()
|
|
||||||
defer s.mu.Unlock()
|
|
||||||
|
|
||||||
n, err := s.r.Read(p)
|
|
||||||
if err == nil {
|
|
||||||
s.pos += int64(n)
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadAt reads data at the specified offset.
|
|
||||||
func (s *SzstdReaderAt) ReadAt(p []byte, off int64) (int, error) {
|
|
||||||
if off < 0 {
|
|
||||||
return 0, errors.New("invalid offset")
|
|
||||||
}
|
|
||||||
if off >= s.metadata.Size {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
endOff := min(off+int64(len(p)), s.metadata.Size)
|
|
||||||
|
|
||||||
// Find all blocks covered by the range
|
|
||||||
type blockInfo struct {
|
|
||||||
index int // Block index
|
|
||||||
offsetInBlock int64 // Offset within the block for starting reading
|
|
||||||
bytesToRead int64 // How many bytes to read from this block
|
|
||||||
}
|
|
||||||
|
|
||||||
var blocks []blockInfo
|
|
||||||
uncompressedOffset := int64(0)
|
|
||||||
currentOff := off
|
|
||||||
|
|
||||||
for i := 0; i < len(s.metadata.BlockData)-1; i++ {
|
|
||||||
blockUncompressedEnd := min(uncompressedOffset+int64(s.metadata.BlockSize), s.metadata.Size)
|
|
||||||
|
|
||||||
if currentOff < blockUncompressedEnd && endOff > uncompressedOffset {
|
|
||||||
offsetInBlock := max(0, currentOff-uncompressedOffset)
|
|
||||||
bytesToRead := min(blockUncompressedEnd-uncompressedOffset-offsetInBlock, endOff-currentOff)
|
|
||||||
|
|
||||||
blocks = append(blocks, blockInfo{
|
|
||||||
index: i,
|
|
||||||
offsetInBlock: offsetInBlock,
|
|
||||||
bytesToRead: bytesToRead,
|
|
||||||
})
|
|
||||||
|
|
||||||
currentOff += bytesToRead
|
|
||||||
if currentOff >= endOff {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
uncompressedOffset = blockUncompressedEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(blocks) == 0 {
|
|
||||||
return 0, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parallel block decoding
|
|
||||||
type decodeResult struct {
|
|
||||||
index int
|
|
||||||
data []byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh := make(chan decodeResult, len(blocks))
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
sem := make(chan struct{}, runtime.NumCPU())
|
|
||||||
|
|
||||||
for _, block := range blocks {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(block blockInfo) {
|
|
||||||
defer wg.Done()
|
|
||||||
sem <- struct{}{}
|
|
||||||
defer func() { <-sem }()
|
|
||||||
|
|
||||||
startOffset := int64(s.metadata.BlockData[block.index])
|
|
||||||
endOffset := int64(s.metadata.BlockData[block.index+1])
|
|
||||||
compressedSize := endOffset - startOffset
|
|
||||||
|
|
||||||
compressed := make([]byte, compressedSize)
|
|
||||||
_, err := s.r.ReadAt(compressed, startOffset)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := s.decoder.DecodeAll(compressed, nil)
|
|
||||||
if err != nil {
|
|
||||||
resultCh <- decodeResult{index: block.index, err: err}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resultCh <- decodeResult{index: block.index, data: decoded, err: nil}
|
|
||||||
}(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(resultCh)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Collect results in block index order
|
|
||||||
totalRead := 0
|
|
||||||
results := make(map[int]decodeResult)
|
|
||||||
expected := len(blocks)
|
|
||||||
minIndex := blocks[0].index
|
|
||||||
|
|
||||||
for res := range resultCh {
|
|
||||||
results[res.index] = res
|
|
||||||
for {
|
|
||||||
if result, ok := results[minIndex]; ok {
|
|
||||||
if result.err != nil {
|
|
||||||
return 0, result.err
|
|
||||||
}
|
|
||||||
// find the corresponding blockInfo
|
|
||||||
var blk blockInfo
|
|
||||||
for _, b := range blocks {
|
|
||||||
if b.index == result.index {
|
|
||||||
blk = b
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
start := blk.offsetInBlock
|
|
||||||
end := start + blk.bytesToRead
|
|
||||||
copy(p[totalRead:totalRead+int(blk.bytesToRead)], result.data[start:end])
|
|
||||||
totalRead += int(blk.bytesToRead)
|
|
||||||
minIndex++
|
|
||||||
if minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(results) == expected && minIndex-blocks[0].index >= len(blocks) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return totalRead, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the SzstdReaderAt and underlying decoder.
|
|
||||||
func (s *SzstdReaderAt) Close() error {
|
|
||||||
if err := s.r.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s.decoder.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// uncompressedModeHandler implements compressionModeHandler for uncompressed files
|
|
||||||
type uncompressedModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (u *uncompressedModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (u *uncompressedModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (u *uncompressedModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return o.Object.Open(ctx, options...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (u *uncompressedModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (u *uncompressedModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (u *uncompressedModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unsupported compression mode %d", Uncompressed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (u *uncompressedModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// unknownModeHandler implements compressionModeHandler for unknown compression types
|
|
||||||
type unknownModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (unk *unknownModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
return false, fmt.Errorf("unknown compression mode %d", compressionMode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (unk *unknownModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (unk *unknownModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
return nil, fmt.Errorf("unknown compression mode %d", o.meta.Mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (unk *unknownModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (unk *unknownModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode %d", f.mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (unk *unknownModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return nil, nil, fmt.Errorf("unknown compression mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (unk *unknownModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
package compress
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/chunkedreader"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// zstdModeHandler implements compressionModeHandler for zstd
|
|
||||||
type zstdModeHandler struct{}
|
|
||||||
|
|
||||||
// isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds
|
|
||||||
// the configured threshold
|
|
||||||
func (z *zstdModeHandler) isCompressible(r io.Reader, compressionMode int) (bool, error) {
|
|
||||||
var b bytes.Buffer
|
|
||||||
var n int64
|
|
||||||
w, err := NewWriterSzstd(&b, zstd.WithEncoderLevel(zstd.SpeedDefault))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
n, err = io.Copy(w, r)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
ratio := float64(n) / float64(b.Len())
|
|
||||||
return ratio > minCompressionRatio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newObjectGetOriginalSize returns the original file size from the metadata
|
|
||||||
func (z *zstdModeHandler) newObjectGetOriginalSize(meta *ObjectMetadata) (int64, error) {
|
|
||||||
if meta.CompressionMetadataZstd == nil {
|
|
||||||
return 0, errors.New("missing zstd metadata")
|
|
||||||
}
|
|
||||||
return meta.CompressionMetadataZstd.Size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openGetReadCloser opens a compressed object and returns a ReadCloser in the Open method
|
|
||||||
func (z *zstdModeHandler) openGetReadCloser(
|
|
||||||
ctx context.Context,
|
|
||||||
o *Object,
|
|
||||||
offset int64,
|
|
||||||
limit int64,
|
|
||||||
cr chunkedreader.ChunkedReader,
|
|
||||||
closer io.Closer,
|
|
||||||
options ...fs.OpenOption,
|
|
||||||
) (rc io.ReadCloser, err error) {
|
|
||||||
var file io.Reader
|
|
||||||
|
|
||||||
if offset != 0 {
|
|
||||||
file, err = NewReaderAtSzstd(cr, o.meta.CompressionMetadataZstd, offset)
|
|
||||||
} else {
|
|
||||||
file, err = zstd.NewReader(cr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileReader io.Reader
|
|
||||||
if limit != -1 {
|
|
||||||
fileReader = io.LimitReader(file, limit)
|
|
||||||
} else {
|
|
||||||
fileReader = file
|
|
||||||
}
|
|
||||||
// Return a ReadCloser
|
|
||||||
return ReadCloserWrapper{Reader: fileReader, Closer: closer}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// processFileNameGetFileExtension returns the file extension for the given compression mode
|
|
||||||
func (z *zstdModeHandler) processFileNameGetFileExtension(compressionMode int) string {
|
|
||||||
if compressionMode == Zstd {
|
|
||||||
return zstdFileExt
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// putCompress compresses the input data and uploads it to the remote, returning the new object and its metadata
|
|
||||||
func (z *zstdModeHandler) putCompress(
|
|
||||||
ctx context.Context,
|
|
||||||
f *Fs,
|
|
||||||
in io.Reader,
|
|
||||||
src fs.ObjectInfo,
|
|
||||||
options []fs.OpenOption,
|
|
||||||
mimeType string,
|
|
||||||
) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
// Unwrap reader accounting
|
|
||||||
in, wrap := accounting.UnWrap(in)
|
|
||||||
|
|
||||||
// Add the metadata hasher
|
|
||||||
metaHasher := md5.New()
|
|
||||||
in = io.TeeReader(in, metaHasher)
|
|
||||||
|
|
||||||
// Compress the file
|
|
||||||
pipeReader, pipeWriter := io.Pipe()
|
|
||||||
|
|
||||||
resultsZstd := make(chan compressionResult[SzstdMetadata])
|
|
||||||
go func() {
|
|
||||||
writer, err := NewWriterSzstd(pipeWriter, zstd.WithEncoderLevel(zstd.EncoderLevel(f.opt.CompressionLevel)))
|
|
||||||
if err != nil {
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err}
|
|
||||||
close(resultsZstd)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err = io.Copy(writer, in)
|
|
||||||
if wErr := writer.Close(); wErr != nil && err == nil {
|
|
||||||
err = wErr
|
|
||||||
}
|
|
||||||
if cErr := pipeWriter.Close(); cErr != nil && err == nil {
|
|
||||||
err = cErr
|
|
||||||
}
|
|
||||||
|
|
||||||
resultsZstd <- compressionResult[SzstdMetadata]{err: err, meta: writer.GetMetadata()}
|
|
||||||
close(resultsZstd)
|
|
||||||
}()
|
|
||||||
|
|
||||||
wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize))
|
|
||||||
|
|
||||||
ht := f.Fs.Hashes().GetOne()
|
|
||||||
var hasher *hash.MultiHasher
|
|
||||||
var err error
|
|
||||||
if ht != hash.None {
|
|
||||||
wrappedIn, wrap = accounting.UnWrap(wrappedIn)
|
|
||||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
|
||||||
wrappedIn = wrap(wrappedIn)
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := <-resultsZstd
|
|
||||||
if result.err != nil {
|
|
||||||
if o != nil {
|
|
||||||
_ = o.Remove(ctx)
|
|
||||||
}
|
|
||||||
return nil, nil, result.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build metadata using uncompressed size for filename
|
|
||||||
meta := z.newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType)
|
|
||||||
if ht != hash.None && hasher != nil {
|
|
||||||
err = f.verifyObjectHash(ctx, o, hasher, ht)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return o, meta, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// putUncompressGetNewMetadata returns metadata in the putUncompress method for a specific compression algorithm
|
|
||||||
func (z *zstdModeHandler) putUncompressGetNewMetadata(o fs.Object, mode int, md5 string, mimeType string, sum []byte) (fs.Object, *ObjectMetadata, error) {
|
|
||||||
return o, z.newMetadata(o.Size(), mode, SzstdMetadata{}, hex.EncodeToString(sum), mimeType), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function generates a metadata object for sgzip.GzipMetadata or SzstdMetadata.
|
|
||||||
// Warning: This function panics if cmeta is not of the expected type.
|
|
||||||
func (z *zstdModeHandler) newMetadata(size int64, mode int, cmeta any, md5 string, mimeType string) *ObjectMetadata {
|
|
||||||
meta, ok := cmeta.(SzstdMetadata)
|
|
||||||
if !ok {
|
|
||||||
panic("invalid cmeta type: expected SzstdMetadata")
|
|
||||||
}
|
|
||||||
|
|
||||||
objMeta := new(ObjectMetadata)
|
|
||||||
objMeta.Size = size
|
|
||||||
objMeta.Mode = mode
|
|
||||||
objMeta.CompressionMetadataGzip = nil
|
|
||||||
objMeta.CompressionMetadataZstd = &meta
|
|
||||||
objMeta.MD5 = md5
|
|
||||||
objMeta.MimeType = mimeType
|
|
||||||
|
|
||||||
return objMeta
|
|
||||||
}
|
|
||||||
@@ -923,30 +923,28 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "encode",
|
Name: "encode",
|
||||||
Short: "Encode the given filename(s).",
|
Short: "Encode the given filename(s)",
|
||||||
Long: `This encodes the filenames given as arguments returning a list of
|
Long: `This encodes the filenames given as arguments returning a list of
|
||||||
strings of the encoded results.
|
strings of the encoded results.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend encode crypt: file1 [file2...]
|
||||||
rclone backend encode crypt: file1 [file2...]
|
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
||||||
rclone rc backend/command command=encode fs=crypt: file1 [file2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "decode",
|
Name: "decode",
|
||||||
Short: "Decode the given filename(s).",
|
Short: "Decode the given filename(s)",
|
||||||
Long: `This decodes the filenames given as arguments returning a list of
|
Long: `This decodes the filenames given as arguments returning a list of
|
||||||
strings of the decoded results. It will return an error if any of the
|
strings of the decoded results. It will return an error if any of the
|
||||||
inputs are invalid.
|
inputs are invalid.
|
||||||
|
|
||||||
Usage examples:
|
Usage Example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone backend decode crypt: encryptedfile1 [encryptedfile2...]
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
`,
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -563,26 +563,21 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Short: "Show metadata about the DOI.",
|
Short: "Show metadata about the DOI.",
|
||||||
Long: `This command returns a JSON object with some information about the DOI.
|
Long: `This command returns a JSON object with some information about the DOI.
|
||||||
|
|
||||||
Usage example:
|
rclone backend medatadata doi:
|
||||||
|
|
||||||
` + "```console" + `
|
It returns a JSON object representing metadata about the DOI.
|
||||||
rclone backend metadata doi:
|
`,
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a JSON object representing metadata about the DOI.`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Short: "Set command for updating the config parameters.",
|
Short: "Set command for updating the config parameters.",
|
||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running doi backend.
|
for a running doi backend.
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
@@ -590,7 +585,8 @@ This rebuilds the connection to the doi backend when it is called with
|
|||||||
the new parameters. Only new parameters need be passed as the values
|
the new parameters. Only new parameters need be passed as the values
|
||||||
will default to those currently in use.
|
will default to those currently in use.
|
||||||
|
|
||||||
It doesn't return anything.`,
|
It doesn't return anything.
|
||||||
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
|
|||||||
@@ -733,17 +733,6 @@ two accounts.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: rwOff,
|
Default: rwOff,
|
||||||
Examples: rwExamples,
|
Examples: rwExamples,
|
||||||
}, {
|
|
||||||
Name: "metadata_enforce_expansive_access",
|
|
||||||
Help: `Whether the request should enforce expansive access rules.
|
|
||||||
|
|
||||||
From Feb 2026 this flag will be set by default so this flag can be used for
|
|
||||||
testing before then.
|
|
||||||
|
|
||||||
See: https://developers.google.com/workspace/drive/api/guides/limited-expansive-access
|
|
||||||
`,
|
|
||||||
Advanced: true,
|
|
||||||
Default: false,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -823,7 +812,6 @@ type Options struct {
|
|||||||
MetadataOwner rwChoice `config:"metadata_owner"`
|
MetadataOwner rwChoice `config:"metadata_owner"`
|
||||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||||
MetadataLabels rwChoice `config:"metadata_labels"`
|
MetadataLabels rwChoice `config:"metadata_labels"`
|
||||||
EnforceExpansiveAccess bool `config:"metadata_enforce_expansive_access"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
}
|
}
|
||||||
@@ -3104,7 +3092,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
_, err = f.svc.Permissions.Create(id, permission).
|
_, err = f.svc.Permissions.Create(id, permission).
|
||||||
Fields("").
|
Fields("").
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -3677,47 +3664,41 @@ func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error)
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "get",
|
Name: "get",
|
||||||
Short: "Get command for fetching the drive config parameters.",
|
Short: "Get command for fetching the drive config parameters",
|
||||||
Long: `This is a get command which will be used to fetch the various drive config
|
Long: `This is a get command which will be used to fetch the various drive config parameters
|
||||||
parameters.
|
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
||||||
rclone backend get drive: [-o service_account_file] [-o chunk_size]
|
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
||||||
rclone rc backend/command command=get fs=drive: [-o service_account_file] [-o chunk_size]
|
`,
|
||||||
` + "```",
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "Show the current upload chunk size.",
|
"chunk_size": "show the current upload chunk size",
|
||||||
"service_account_file": "Show the current service account file.",
|
"service_account_file": "show the current service account file",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Short: "Set command for updating the drive config parameters.",
|
Short: "Set command for updating the drive config parameters",
|
||||||
Long: `This is a set command which will be used to update the various drive config
|
Long: `This is a set command which will be used to update the various drive config parameters
|
||||||
parameters.
|
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
rclone backend set drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
||||||
rclone rc backend/command command=set fs=drive: [-o service_account_file=sa.json] [-o chunk_size=67108864]
|
`,
|
||||||
` + "```",
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"chunk_size": "Update the current upload chunk size.",
|
"chunk_size": "update the current upload chunk size",
|
||||||
"service_account_file": "Update the current service account file.",
|
"service_account_file": "update the current service account file",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "shortcut",
|
Name: "shortcut",
|
||||||
Short: "Create shortcuts from files or directories.",
|
Short: "Create shortcuts from files or directories",
|
||||||
Long: `This command creates shortcuts from files or directories.
|
Long: `This command creates shortcuts from files or directories.
|
||||||
|
|
||||||
Usage examples:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend shortcut drive: source_item destination_shortcut
|
||||||
rclone backend shortcut drive: source_item destination_shortcut
|
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
||||||
rclone backend shortcut drive: source_item -o target=drive2: destination_shortcut
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
In the first example this creates a shortcut from the "source_item"
|
In the first example this creates a shortcut from the "source_item"
|
||||||
which can be a file or a directory to the "destination_shortcut". The
|
which can be a file or a directory to the "destination_shortcut". The
|
||||||
@@ -3727,100 +3708,90 @@ from "drive:"
|
|||||||
In the second example this creates a shortcut from the "source_item"
|
In the second example this creates a shortcut from the "source_item"
|
||||||
relative to "drive:" to the "destination_shortcut" relative to
|
relative to "drive:" to the "destination_shortcut" relative to
|
||||||
"drive2:". This may fail with a permission error if the user
|
"drive2:". This may fail with a permission error if the user
|
||||||
authenticated with "drive2:" can't read files from "drive:".`,
|
authenticated with "drive2:" can't read files from "drive:".
|
||||||
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"target": "Optional target remote for the shortcut destination.",
|
"target": "optional target remote for the shortcut destination",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "drives",
|
Name: "drives",
|
||||||
Short: "List the Shared Drives available to this account.",
|
Short: "List the Shared Drives available to this account",
|
||||||
Long: `This command lists the Shared Drives (Team Drives) available to this
|
Long: `This command lists the Shared Drives (Team Drives) available to this
|
||||||
account.
|
account.
|
||||||
|
|
||||||
Usage example:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend [-o config] drives drive:
|
||||||
rclone backend [-o config] drives drive:
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This will return a JSON list of objects like this:
|
This will return a JSON list of objects like this
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"id": "0ABCDEF-01234567890",
|
||||||
"id": "0ABCDEF-01234567890",
|
"kind": "drive#teamDrive",
|
||||||
"kind": "drive#teamDrive",
|
"name": "My Drive"
|
||||||
"name": "My Drive"
|
},
|
||||||
},
|
{
|
||||||
{
|
"id": "0ABCDEFabcdefghijkl",
|
||||||
"id": "0ABCDEFabcdefghijkl",
|
"kind": "drive#teamDrive",
|
||||||
"kind": "drive#teamDrive",
|
"name": "Test Drive"
|
||||||
"name": "Test Drive"
|
}
|
||||||
}
|
]
|
||||||
]
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
With the -o config parameter it will output the list in a format
|
With the -o config parameter it will output the list in a format
|
||||||
suitable for adding to a config file to make aliases for all the
|
suitable for adding to a config file to make aliases for all the
|
||||||
drives found and a combined drive.
|
drives found and a combined drive.
|
||||||
|
|
||||||
` + "```ini" + `
|
[My Drive]
|
||||||
[My Drive]
|
type = alias
|
||||||
type = alias
|
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
|
||||||
|
|
||||||
[Test Drive]
|
[Test Drive]
|
||||||
type = alias
|
type = alias
|
||||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||||
|
|
||||||
[AllDrives]
|
[AllDrives]
|
||||||
type = combine
|
type = combine
|
||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal characters will be
|
be accessible with the aliases shown. Any illegal characters will be
|
||||||
substituted with "_" and duplicate names will have numbers suffixed.
|
substituted with "_" and duplicate names will have numbers suffixed.
|
||||||
It will also add a remote called AllDrives which shows all the shared
|
It will also add a remote called AllDrives which shows all the shared
|
||||||
drives combined into one directory tree.`,
|
drives combined into one directory tree.
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "untrash",
|
Name: "untrash",
|
||||||
Short: "Untrash files and directories.",
|
Short: "Untrash files and directories",
|
||||||
Long: `This command untrashes all the files and directories in the directory
|
Long: `This command untrashes all the files and directories in the directory
|
||||||
passed in recursively.
|
passed in recursively.
|
||||||
|
|
||||||
Usage example:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend untrash drive:directory
|
|
||||||
rclone backend --interactive untrash drive:directory subdir
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This takes an optional directory to trash which make this easier to
|
This takes an optional directory to trash which make this easier to
|
||||||
use via the API.
|
use via the API.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be restored before
|
rclone backend untrash drive:directory
|
||||||
restoring it.
|
rclone backend --interactive untrash drive:directory subdir
|
||||||
|
|
||||||
|
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
` + "```json" + `
|
{
|
||||||
{
|
"Untrashed": 17,
|
||||||
"Untrashed": 17,
|
"Errors": 0
|
||||||
"Errors": 0
|
}
|
||||||
}
|
`,
|
||||||
` + "```",
|
|
||||||
}, {
|
}, {
|
||||||
Name: "copyid",
|
Name: "copyid",
|
||||||
Short: "Copy files by ID.",
|
Short: "Copy files by ID",
|
||||||
Long: `This command copies files by ID.
|
Long: `This command copies files by ID
|
||||||
|
|
||||||
Usage examples:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend copyid drive: ID path
|
||||||
rclone backend copyid drive: ID path
|
rclone backend copyid drive: ID1 path1 ID2 path2
|
||||||
rclone backend copyid drive: ID1 path1 ID2 path2
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It copies the drive file with ID given to the path (an rclone path which
|
It copies the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone copyto). The ID and path pairs can be
|
will be passed internally to rclone copyto). The ID and path pairs can be
|
||||||
@@ -3833,19 +3804,17 @@ component will be used as the file name.
|
|||||||
If the destination is a drive backend then server-side copying will be
|
If the destination is a drive backend then server-side copying will be
|
||||||
attempted if possible.
|
attempted if possible.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be copied before
|
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||||
copying.`,
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "moveid",
|
Name: "moveid",
|
||||||
Short: "Move files by ID.",
|
Short: "Move files by ID",
|
||||||
Long: `This command moves files by ID.
|
Long: `This command moves files by ID
|
||||||
|
|
||||||
Usage examples:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend moveid drive: ID path
|
||||||
rclone backend moveid drive: ID path
|
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It moves the drive file with ID given to the path (an rclone path which
|
It moves the drive file with ID given to the path (an rclone path which
|
||||||
will be passed internally to rclone moveto).
|
will be passed internally to rclone moveto).
|
||||||
@@ -3857,65 +3826,58 @@ component will be used as the file name.
|
|||||||
If the destination is a drive backend then server-side moving will be
|
If the destination is a drive backend then server-side moving will be
|
||||||
attempted if possible.
|
attempted if possible.
|
||||||
|
|
||||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.`,
|
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "exportformats",
|
Name: "exportformats",
|
||||||
Short: "Dump the export formats for debug purposes.",
|
Short: "Dump the export formats for debug purposes",
|
||||||
}, {
|
}, {
|
||||||
Name: "importformats",
|
Name: "importformats",
|
||||||
Short: "Dump the import formats for debug purposes.",
|
Short: "Dump the import formats for debug purposes",
|
||||||
}, {
|
}, {
|
||||||
Name: "query",
|
Name: "query",
|
||||||
Short: "List files using Google Drive query language.",
|
Short: "List files using Google Drive query language",
|
||||||
Long: `This command lists files based on a query.
|
Long: `This command lists files based on a query
|
||||||
|
|
||||||
Usage example:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend query drive: query
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
|
rclone backend query drive: query
|
||||||
|
|
||||||
The query syntax is documented at [Google Drive Search query terms and
|
The query syntax is documented at [Google Drive Search query terms and
|
||||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
If the query contains literal ' or \ characters, these need to be escaped with
|
If the query contains literal ' or \ characters, these need to be escaped with
|
||||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||||
file named "foo ' \.txt":
|
file named "foo ' \.txt":
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The result is a JSON array of matches, for example:
|
The result is a JSON array of matches, for example:
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
"mimeType": "text/plain",
|
||||||
"mimeType": "text/plain",
|
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
"name": "foo ' \\.txt",
|
||||||
"name": "foo ' \\.txt",
|
"parents": [
|
||||||
"parents": [
|
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
],
|
||||||
],
|
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
"size": "311",
|
||||||
"size": "311",
|
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
}
|
||||||
}
|
]`,
|
||||||
]
|
|
||||||
` + "```console",
|
|
||||||
}, {
|
}, {
|
||||||
Name: "rescue",
|
Name: "rescue",
|
||||||
Short: "Rescue or delete any orphaned files.",
|
Short: "Rescue or delete any orphaned files",
|
||||||
Long: `This command rescues or deletes any orphaned files or directories.
|
Long: `This command rescues or deletes any orphaned files or directories.
|
||||||
|
|
||||||
Sometimes files can get orphaned in Google Drive. This means that they
|
Sometimes files can get orphaned in Google Drive. This means that they
|
||||||
@@ -3924,31 +3886,26 @@ are no longer in any folder in Google Drive.
|
|||||||
This command finds those files and either rescues them to a directory
|
This command finds those files and either rescues them to a directory
|
||||||
you specify or deletes them.
|
you specify or deletes them.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
This can be used in 3 ways.
|
This can be used in 3 ways.
|
||||||
|
|
||||||
First, list all orphaned files:
|
First, list all orphaned files
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend rescue drive:
|
||||||
rclone backend rescue drive:
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Second rescue all orphaned files to the directory indicated:
|
Second rescue all orphaned files to the directory indicated
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
||||||
rclone backend rescue drive: "relative/path/to/rescue/directory"
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
E.g. to rescue all orphans to a directory called "Orphans" in the top level:
|
e.g. To rescue all orphans to a directory called "Orphans" in the top level
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend rescue drive: Orphans
|
||||||
rclone backend rescue drive: Orphans
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Third delete all orphaned files to the trash:
|
Third delete all orphaned files to the trash
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend rescue drive: -o delete
|
||||||
rclone backend rescue drive: -o delete
|
`,
|
||||||
` + "```",
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
|
|||||||
@@ -149,7 +149,6 @@ func (f *Fs) setPermissions(ctx context.Context, info *drive.File, permissions [
|
|||||||
_, err := f.svc.Permissions.Create(info.Id, perm).
|
_, err := f.svc.Permissions.Create(info.Id, perm).
|
||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
SendNotificationEmail(false).
|
SendNotificationEmail(false).
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
@@ -484,7 +483,6 @@ func (f *Fs) setOwner(ctx context.Context, info *drive.File, owner string) (err
|
|||||||
SupportsAllDrives(true).
|
SupportsAllDrives(true).
|
||||||
TransferOwnership(true).
|
TransferOwnership(true).
|
||||||
// SendNotificationEmail(false). - required apparently!
|
// SendNotificationEmail(false). - required apparently!
|
||||||
EnforceExpansiveAccess(f.opt.EnforceExpansiveAccess).
|
|
||||||
Context(ctx).Do()
|
Context(ctx).Do()
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -1330,16 +1330,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
var result *files.RelocationResult
|
var result *files.RelocationResult
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
result, err = f.srv.MoveV2(&arg)
|
result, err = f.srv.MoveV2(&arg)
|
||||||
switch e := err.(type) {
|
|
||||||
case files.MoveV2APIError:
|
|
||||||
// There seems to be a bit of eventual consistency here which causes this to
|
|
||||||
// fail on just created objects
|
|
||||||
// See: https://github.com/rclone/rclone/issues/8881
|
|
||||||
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
|
|
||||||
fs.Debugf(srcObj, "Retrying move on %v error", err)
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1292,7 +1292,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||||
if errX := textprotoError(err); errX != nil {
|
if errX := textprotoError(err); errX != nil {
|
||||||
switch errX.Code {
|
switch errX.Code {
|
||||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK:
|
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,42 +43,33 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "drop",
|
Name: "drop",
|
||||||
Short: "Drop cache.",
|
Short: "Drop cache",
|
||||||
Long: `Completely drop checksum cache.
|
Long: `Completely drop checksum cache.
|
||||||
|
Usage Example:
|
||||||
Usage example:
|
rclone backend drop hasher:
|
||||||
|
`,
|
||||||
` + "```console" + `
|
|
||||||
rclone backend drop hasher:
|
|
||||||
` + "```",
|
|
||||||
}, {
|
}, {
|
||||||
Name: "dump",
|
Name: "dump",
|
||||||
Short: "Dump the database.",
|
Short: "Dump the database",
|
||||||
Long: "Dump cache records covered by the current remote.",
|
Long: "Dump cache records covered by the current remote",
|
||||||
}, {
|
}, {
|
||||||
Name: "fulldump",
|
Name: "fulldump",
|
||||||
Short: "Full dump of the database.",
|
Short: "Full dump of the database",
|
||||||
Long: "Dump all cache records in the database.",
|
Long: "Dump all cache records in the database",
|
||||||
}, {
|
}, {
|
||||||
Name: "import",
|
Name: "import",
|
||||||
Short: "Import a SUM file.",
|
Short: "Import a SUM file",
|
||||||
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
Long: `Amend hash cache from a SUM file and bind checksums to files by size/time.
|
||||||
|
Usage Example:
|
||||||
Usage example:
|
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
||||||
|
`,
|
||||||
` + "```console" + `
|
|
||||||
rclone backend import hasher:subdir md5 /path/to/sum.md5
|
|
||||||
` + "```",
|
|
||||||
}, {
|
}, {
|
||||||
Name: "stickyimport",
|
Name: "stickyimport",
|
||||||
Short: "Perform fast import of a SUM file.",
|
Short: "Perform fast import of a SUM file",
|
||||||
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
Long: `Fill hash cache from a SUM file without verifying file fingerprints.
|
||||||
|
Usage Example:
|
||||||
Usage example:
|
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
||||||
|
`,
|
||||||
` + "```console" + `
|
|
||||||
rclone backend stickyimport hasher:subdir md5 remote:path/to/sum.md5
|
|
||||||
` + "```",
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/textproto"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -38,10 +37,6 @@ func init() {
|
|||||||
Description: "HTTP",
|
Description: "HTTP",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
CommandHelp: commandHelp,
|
CommandHelp: commandHelp,
|
||||||
MetadataInfo: &fs.MetadataInfo{
|
|
||||||
System: systemMetadataInfo,
|
|
||||||
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
|
|
||||||
},
|
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||||
@@ -103,40 +98,6 @@ sizes of any files, and some files that don't exist may be in the listing.`,
|
|||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
// system metadata keys which this backend owns
|
|
||||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|
||||||
"cache-control": {
|
|
||||||
Help: "Cache-Control header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "no-cache",
|
|
||||||
},
|
|
||||||
"content-disposition": {
|
|
||||||
Help: "Content-Disposition header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "inline",
|
|
||||||
},
|
|
||||||
"content-disposition-filename": {
|
|
||||||
Help: "Filename retrieved from Content-Disposition header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "file.txt",
|
|
||||||
},
|
|
||||||
"content-encoding": {
|
|
||||||
Help: "Content-Encoding header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "gzip",
|
|
||||||
},
|
|
||||||
"content-language": {
|
|
||||||
Help: "Content-Language header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "en-US",
|
|
||||||
},
|
|
||||||
"content-type": {
|
|
||||||
Help: "Content-Type header",
|
|
||||||
Type: "string",
|
|
||||||
Example: "text/plain",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Endpoint string `config:"url"`
|
Endpoint string `config:"url"`
|
||||||
@@ -165,13 +126,6 @@ type Object struct {
|
|||||||
size int64
|
size int64
|
||||||
modTime time.Time
|
modTime time.Time
|
||||||
contentType string
|
contentType string
|
||||||
|
|
||||||
// Metadata as pointers to strings as they often won't be present
|
|
||||||
contentDisposition *string // Content-Disposition: header
|
|
||||||
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
|
|
||||||
cacheControl *string // Cache-Control: header
|
|
||||||
contentEncoding *string // Content-Encoding: header
|
|
||||||
contentLanguage *string // Content-Language: header
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusError returns an error if the res contained an error
|
// statusError returns an error if the res contained an error
|
||||||
@@ -323,7 +277,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
ci: ci,
|
ci: ci,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMetadata: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
@@ -476,29 +429,6 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseFilename extracts the filename from a Content-Disposition header
|
|
||||||
func parseFilename(contentDisposition string) (string, error) {
|
|
||||||
// Normalize the contentDisposition to canonical MIME format
|
|
||||||
mediaType, params, err := mime.ParseMediaType(contentDisposition)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the contentDisposition is an attachment
|
|
||||||
if strings.ToLower(mediaType) != "attachment" {
|
|
||||||
return "", fmt.Errorf("not an attachment: %s", mediaType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the filename from the parameters
|
|
||||||
filename, ok := params["filename"]
|
|
||||||
if !ok {
|
|
||||||
return "", fmt.Errorf("filename not found in contentDisposition")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode filename if it contains special encoding
|
|
||||||
return textproto.TrimString(filename), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds the configured headers to the request if any
|
// Adds the configured headers to the request if any
|
||||||
func addHeaders(req *http.Request, opt *Options) {
|
func addHeaders(req *http.Request, opt *Options) {
|
||||||
for i := 0; i < len(opt.Headers); i += 2 {
|
for i := 0; i < len(opt.Headers); i += 2 {
|
||||||
@@ -647,9 +577,6 @@ func (o *Object) String() string {
|
|||||||
|
|
||||||
// Remote the name of the remote HTTP file, relative to the fs root
|
// Remote the name of the remote HTTP file, relative to the fs root
|
||||||
func (o *Object) Remote() string {
|
func (o *Object) Remote() string {
|
||||||
if o.contentDispositionFilename != nil {
|
|
||||||
return *o.contentDispositionFilename
|
|
||||||
}
|
|
||||||
return o.remote
|
return o.remote
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -707,29 +634,6 @@ func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
|||||||
o.modTime = t
|
o.modTime = t
|
||||||
o.contentType = res.Header.Get("Content-Type")
|
o.contentType = res.Header.Get("Content-Type")
|
||||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||||
contentDisposition := res.Header.Get("Content-Disposition")
|
|
||||||
if contentDisposition != "" {
|
|
||||||
o.contentDisposition = &contentDisposition
|
|
||||||
}
|
|
||||||
if o.contentDisposition != nil {
|
|
||||||
var filename string
|
|
||||||
filename, err = parseFilename(*o.contentDisposition)
|
|
||||||
if err == nil && filename != "" {
|
|
||||||
o.contentDispositionFilename = &filename
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cacheControl := res.Header.Get("Cache-Control")
|
|
||||||
if cacheControl != "" {
|
|
||||||
o.cacheControl = &cacheControl
|
|
||||||
}
|
|
||||||
contentEncoding := res.Header.Get("Content-Encoding")
|
|
||||||
if contentEncoding != "" {
|
|
||||||
o.contentEncoding = &contentEncoding
|
|
||||||
}
|
|
||||||
contentLanguage := res.Header.Get("Content-Language")
|
|
||||||
if contentLanguage != "" {
|
|
||||||
o.contentLanguage = &contentLanguage
|
|
||||||
}
|
|
||||||
|
|
||||||
// If NoSlash is set then check ContentType to see if it is a directory
|
// If NoSlash is set then check ContentType to see if it is a directory
|
||||||
if o.fs.opt.NoSlash {
|
if o.fs.opt.NoSlash {
|
||||||
@@ -818,13 +722,11 @@ var commandHelp = []fs.CommandHelp{{
|
|||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running http backend.
|
for a running http backend.
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone backend set remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=remote: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
||||||
rclone rc backend/command command=set fs=remote: -o url=https://example.com
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
@@ -832,7 +734,8 @@ This rebuilds the connection to the http backend when it is called with
|
|||||||
the new parameters. Only new parameters need be passed as the values
|
the new parameters. Only new parameters need be passed as the values
|
||||||
will default to those currently in use.
|
will default to those currently in use.
|
||||||
|
|
||||||
It doesn't return anything.`,
|
It doesn't return anything.
|
||||||
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -868,30 +771,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metadata returns metadata for an object
|
|
||||||
//
|
|
||||||
// It should return nil if there is no Metadata
|
|
||||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
|
||||||
metadata = make(fs.Metadata, 6)
|
|
||||||
if o.contentType != "" {
|
|
||||||
metadata["content-type"] = o.contentType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set system metadata
|
|
||||||
setMetadata := func(k string, v *string) {
|
|
||||||
if v == nil || *v == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
metadata[k] = *v
|
|
||||||
}
|
|
||||||
setMetadata("content-disposition", o.contentDisposition)
|
|
||||||
setMetadata("content-disposition-filename", o.contentDispositionFilename)
|
|
||||||
setMetadata("cache-control", o.cacheControl)
|
|
||||||
setMetadata("content-language", o.contentLanguage)
|
|
||||||
setMetadata("content-encoding", o.contentEncoding)
|
|
||||||
return metadata, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the interfaces are satisfied
|
// Check the interfaces are satisfied
|
||||||
var (
|
var (
|
||||||
_ fs.Fs = &Fs{}
|
_ fs.Fs = &Fs{}
|
||||||
@@ -899,5 +778,4 @@ var (
|
|||||||
_ fs.Object = &Object{}
|
_ fs.Object = &Object{}
|
||||||
_ fs.MimeTyper = &Object{}
|
_ fs.MimeTyper = &Object{}
|
||||||
_ fs.Commander = &Fs{}
|
_ fs.Commander = &Fs{}
|
||||||
_ fs.Metadataer = &Object{}
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -60,17 +60,6 @@ func prepareServer(t *testing.T) configmap.Simple {
|
|||||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||||
|
|
||||||
// Set the content disposition header for the fifth file
|
|
||||||
// later we will check if it is set using the metadata method
|
|
||||||
if r.URL.Path == "/five.txt.gz" {
|
|
||||||
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
|
|
||||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
||||||
w.Header().Set("Cache-Control", "no-cache")
|
|
||||||
w.Header().Set("Content-Language", "en-US")
|
|
||||||
w.Header().Set("Content-Encoding", "gzip")
|
|
||||||
}
|
|
||||||
|
|
||||||
fileServer.ServeHTTP(w, r)
|
fileServer.ServeHTTP(w, r)
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -113,33 +102,27 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
|||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
|
|
||||||
require.Equal(t, 5, len(entries))
|
require.Equal(t, 4, len(entries))
|
||||||
|
|
||||||
e := entries[0]
|
e := entries[0]
|
||||||
assert.Equal(t, "five.txt.gz", e.Remote())
|
assert.Equal(t, "four", e.Remote())
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
_, ok := e.(fs.Object)
|
_, ok := e.(fs.Directory)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[1]
|
e = entries[1]
|
||||||
assert.Equal(t, "four", e.Remote())
|
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
|
||||||
_, ok = e.(fs.Directory)
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
e = entries[2]
|
|
||||||
assert.Equal(t, "one%.txt", e.Remote())
|
assert.Equal(t, "one%.txt", e.Remote())
|
||||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||||
_, ok = e.(*Object)
|
_, ok = e.(*Object)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[3]
|
e = entries[2]
|
||||||
assert.Equal(t, "three", e.Remote())
|
assert.Equal(t, "three", e.Remote())
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
_, ok = e.(fs.Directory)
|
_, ok = e.(fs.Directory)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
e = entries[4]
|
e = entries[3]
|
||||||
assert.Equal(t, "two.html", e.Remote())
|
assert.Equal(t, "two.html", e.Remote())
|
||||||
if noSlash {
|
if noSlash {
|
||||||
assert.Equal(t, int64(-1), e.Size())
|
assert.Equal(t, int64(-1), e.Size())
|
||||||
@@ -235,23 +218,6 @@ func TestNewObjectWithLeadingSlash(t *testing.T) {
|
|||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewObjectWithMetadata(t *testing.T) {
|
|
||||||
f := prepare(t)
|
|
||||||
o, err := f.NewObject(context.Background(), "/five.txt.gz")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "five.txt.gz", o.Remote())
|
|
||||||
ho, ok := o.(*Object)
|
|
||||||
assert.True(t, ok)
|
|
||||||
metadata, err := ho.Metadata(context.Background())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
|
|
||||||
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
|
|
||||||
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
|
|
||||||
assert.Equal(t, "no-cache", metadata["cache-control"])
|
|
||||||
assert.Equal(t, "en-US", metadata["content-language"])
|
|
||||||
assert.Equal(t, "gzip", metadata["content-encoding"])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOpen(t *testing.T) {
|
func TestOpen(t *testing.T) {
|
||||||
m := prepareServer(t)
|
m := prepareServer(t)
|
||||||
|
|
||||||
|
|||||||
Binary file not shown.
@@ -1070,11 +1070,12 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
var commandHelp = []fs.CommandHelp{
|
var commandHelp = []fs.CommandHelp{
|
||||||
{
|
{
|
||||||
Name: "noop",
|
Name: "noop",
|
||||||
Short: "A null operation for testing backend commands.",
|
Short: "A null operation for testing backend commands",
|
||||||
Long: `This is a test command which has some options you can try to change the output.`,
|
Long: `This is a test command which has some options
|
||||||
|
you can try to change the output.`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"echo": "Echo the input arguments.",
|
"echo": "echo the input arguments",
|
||||||
"error": "Return an error based on option value.",
|
"error": "return an error based on option value",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ Improvements:
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -48,9 +47,6 @@ const (
|
|||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
eventWaitTime = 500 * time.Millisecond
|
eventWaitTime = 500 * time.Millisecond
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
|
||||||
sessionIDConfigKey = "session_id"
|
|
||||||
masterKeyConfigKey = "master_key"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -74,24 +70,6 @@ func init() {
|
|||||||
Help: "Password.",
|
Help: "Password.",
|
||||||
Required: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
|
||||||
Name: "2fa",
|
|
||||||
Help: `The 2FA code of your MEGA account if the account is set up with one`,
|
|
||||||
Required: false,
|
|
||||||
}, {
|
|
||||||
Name: sessionIDConfigKey,
|
|
||||||
Help: "Session (internal use only)",
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
Hide: fs.OptionHideBoth,
|
|
||||||
}, {
|
|
||||||
Name: masterKeyConfigKey,
|
|
||||||
Help: "Master key (internal use only)",
|
|
||||||
Required: false,
|
|
||||||
Advanced: true,
|
|
||||||
Sensitive: true,
|
|
||||||
Hide: fs.OptionHideBoth,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "debug",
|
Name: "debug",
|
||||||
Help: `Output more debug from Mega.
|
Help: `Output more debug from Mega.
|
||||||
@@ -135,9 +113,6 @@ Enabling it will increase CPU usage and add network overhead.`,
|
|||||||
type Options struct {
|
type Options struct {
|
||||||
User string `config:"user"`
|
User string `config:"user"`
|
||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
TwoFA string `config:"2fa"`
|
|
||||||
SessionID string `config:"session_id"`
|
|
||||||
MasterKey string `config:"master_key"`
|
|
||||||
Debug bool `config:"debug"`
|
Debug bool `config:"debug"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
UseHTTPS bool `config:"use_https"`
|
UseHTTPS bool `config:"use_https"`
|
||||||
@@ -234,19 +209,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
// Create Fs
|
|
||||||
root = parsePath(root)
|
|
||||||
f := &Fs{
|
|
||||||
name: name,
|
|
||||||
root: root,
|
|
||||||
opt: *opt,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
}
|
|
||||||
f.features = (&fs.Features{
|
|
||||||
DuplicateFiles: true,
|
|
||||||
CanHaveEmptyDirectories: true,
|
|
||||||
}).Fill(ctx, f)
|
|
||||||
|
|
||||||
// cache *mega.Mega on username so we can reuse and share
|
// cache *mega.Mega on username so we can reuse and share
|
||||||
// them between remotes. They are expensive to make as they
|
// them between remotes. They are expensive to make as they
|
||||||
// contain all the objects and sharing the objects makes the
|
// contain all the objects and sharing the objects makes the
|
||||||
@@ -286,29 +248,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.SessionID == "" {
|
err := srv.Login(opt.User, opt.Pass)
|
||||||
fs.Debugf(f, "Using username and password to initialize the Mega API")
|
if err != nil {
|
||||||
err := srv.MultiFactorLogin(opt.User, opt.Pass, opt.TwoFA)
|
return nil, fmt.Errorf("couldn't login: %w", err)
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't login: %w", err)
|
|
||||||
}
|
|
||||||
megaCache[opt.User] = srv
|
|
||||||
m.Set(sessionIDConfigKey, srv.GetSessionID())
|
|
||||||
encodedMasterKey := base64.StdEncoding.EncodeToString(srv.GetMasterKey())
|
|
||||||
m.Set(masterKeyConfigKey, encodedMasterKey)
|
|
||||||
} else {
|
|
||||||
fs.Debugf(f, "Using previously stored session ID and master key to initialize the Mega API")
|
|
||||||
decodedMasterKey, err := base64.StdEncoding.DecodeString(opt.MasterKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't decode master key: %w", err)
|
|
||||||
}
|
|
||||||
err = srv.LoginWithKeys(opt.SessionID, decodedMasterKey)
|
|
||||||
if err != nil {
|
|
||||||
fs.Debugf(f, "login with previous auth keys failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
megaCache[opt.User] = srv
|
||||||
}
|
}
|
||||||
f.srv = srv
|
|
||||||
|
root = parsePath(root)
|
||||||
|
f := &Fs{
|
||||||
|
name: name,
|
||||||
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
|
srv: srv,
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
|
}
|
||||||
|
f.features = (&fs.Features{
|
||||||
|
DuplicateFiles: true,
|
||||||
|
CanHaveEmptyDirectories: true,
|
||||||
|
}).Fill(ctx, f)
|
||||||
|
|
||||||
// Find the root node and check if it is a file or not
|
// Find the root node and check if it is a file or not
|
||||||
_, err = f.findRoot(ctx, false)
|
_, err = f.findRoot(ctx, false)
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ Please choose the 'y' option to set your own password then enter your secret.`,
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "du",
|
Name: "du",
|
||||||
Short: "Return disk usage information for a specified directory.",
|
Short: "Return disk usage information for a specified directory",
|
||||||
Long: `The usage information returned, includes the targeted directory as well as all
|
Long: `The usage information returned, includes the targeted directory as well as all
|
||||||
files stored in any sub-directories that may exist.`,
|
files stored in any sub-directories that may exist.`,
|
||||||
}, {
|
}, {
|
||||||
@@ -96,12 +96,7 @@ files stored in any sub-directories that may exist.`,
|
|||||||
Long: `The desired path location (including applicable sub-directories) ending in
|
Long: `The desired path location (including applicable sub-directories) ending in
|
||||||
the object that will be the target of the symlink (for example, /links/mylink).
|
the object that will be the target of the symlink (for example, /links/mylink).
|
||||||
Include the file extension for the object, if applicable.
|
Include the file extension for the object, if applicable.
|
||||||
|
` + "`rclone backend symlink <src> <path>`",
|
||||||
Usage example:
|
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend symlink <src> <path>
|
|
||||||
` + "```",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -30,25 +30,20 @@ const (
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: operationRename,
|
Name: operationRename,
|
||||||
Short: "change the name of an object.",
|
Short: "change the name of an object",
|
||||||
Long: `This command can be used to rename a object.
|
Long: `This command can be used to rename a object.
|
||||||
|
|
||||||
Usage example:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
`,
|
||||||
` + "```",
|
|
||||||
Opts: nil,
|
Opts: nil,
|
||||||
}, {
|
}, {
|
||||||
Name: operationListMultiPart,
|
Name: operationListMultiPart,
|
||||||
Short: "List the unfinished multipart uploads.",
|
Short: "List the unfinished multipart uploads",
|
||||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||||
|
|
||||||
Usage example:
|
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a dictionary of buckets with values as lists of unfinished
|
It returns a dictionary of buckets with values as lists of unfinished
|
||||||
multipart uploads.
|
multipart uploads.
|
||||||
@@ -56,82 +51,70 @@ multipart uploads.
|
|||||||
You can call it with no bucket in which case it lists all bucket, with
|
You can call it with no bucket in which case it lists all bucket, with
|
||||||
a bucket or with a bucket and path.
|
a bucket or with a bucket and path.
|
||||||
|
|
||||||
` + "```json" + `
|
{
|
||||||
{
|
"test-bucket": [
|
||||||
"test-bucket": [
|
{
|
||||||
{
|
"namespace": "test-namespace",
|
||||||
"namespace": "test-namespace",
|
"bucket": "test-bucket",
|
||||||
"bucket": "test-bucket",
|
"object": "600m.bin",
|
||||||
"object": "600m.bin",
|
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
"storageTier": "Standard"
|
||||||
"storageTier": "Standard"
|
}
|
||||||
}
|
]
|
||||||
]
|
`,
|
||||||
}`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: operationCleanup,
|
Name: operationCleanup,
|
||||||
Short: "Remove unfinished multipart uploads.",
|
Short: "Remove unfinished multipart uploads.",
|
||||||
Long: `This command removes unfinished multipart uploads of age greater than
|
Long: `This command removes unfinished multipart uploads of age greater than
|
||||||
max-age which defaults to 24 hours.
|
max-age which defaults to 24 hours.
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
what it would do.
|
it would do.
|
||||||
|
|
||||||
Usage examples:
|
rclone backend cleanup oos:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||||
|
|
||||||
` + "```console" + `
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
rclone backend cleanup oos:bucket/path/to/object
|
`,
|
||||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete.",
|
"max-age": "Max age of upload to delete",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: operationRestore,
|
Name: operationRestore,
|
||||||
Short: "Restore objects from Archive to Standard storage.",
|
Short: "Restore objects from Archive to Standard storage",
|
||||||
Long: `This command can be used to restore one or more objects from Archive to
|
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||||
Standard storage.
|
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
rclone backend restore oos:bucket -o hours=HOURS
|
||||||
rclone backend restore oos:bucket -o hours=HOURS
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
|
|
||||||
` + "```console" + `
|
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
All the objects shown will be marked for restore, then:
|
All the objects shown will be marked for restore, then
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a list of status dictionaries with Object Name and Status keys.
|
It returns a list of status dictionaries with Object Name and Status
|
||||||
The Status will be "RESTORED"" if it was successful or an error message if not.
|
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||||
|
if not.
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"Object": "test.txt"
|
||||||
"Object": "test.txt"
|
"Status": "RESTORED",
|
||||||
"Status": "RESTORED",
|
},
|
||||||
},
|
{
|
||||||
{
|
"Object": "test/file4.txt"
|
||||||
"Object": "test/file4.txt"
|
"Status": "RESTORED",
|
||||||
"Status": "RESTORED",
|
}
|
||||||
}
|
]
|
||||||
]
|
`,
|
||||||
` + "```",
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"hours": `The number of hours for which this object will be restored.
|
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||||
Default is 24 hrs.`,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ func TestLinkValid(t *testing.T) {
|
|||||||
Expire: Time(time.Now().Add(time.Hour)),
|
Expire: Time(time.Now().Add(time.Hour)),
|
||||||
},
|
},
|
||||||
expected: true,
|
expected: true,
|
||||||
desc: "should fallback to Expire field when URL expire parameter is unparsable",
|
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid when both URL expire and Expire field are expired",
|
name: "invalid when both URL expire and Expire field are expired",
|
||||||
|
|||||||
@@ -1678,43 +1678,39 @@ func (f *Fs) decompressDir(ctx context.Context, filename, id, password string, s
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "addurl",
|
Name: "addurl",
|
||||||
Short: "Add offline download task for url.",
|
Short: "Add offline download task for url",
|
||||||
Long: `This command adds offline download task for url.
|
Long: `This command adds offline download task for url.
|
||||||
|
|
||||||
Usage example:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend addurl pikpak:dirpath url
|
||||||
rclone backend addurl pikpak:dirpath url
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
|
||||||
download will fallback to default 'My Pack' folder.`,
|
download will fallback to default 'My Pack' folder.
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "decompress",
|
Name: "decompress",
|
||||||
Short: "Request decompress of a file/files in a folder.",
|
Short: "Request decompress of a file/files in a folder",
|
||||||
Long: `This command requests decompress of file/files in a folder.
|
Long: `This command requests decompress of file/files in a folder.
|
||||||
|
|
||||||
Usage examples:
|
Usage:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
||||||
rclone backend decompress pikpak:dirpath {filename} -o password=password
|
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
||||||
rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
An optional argument 'filename' can be specified for a file located in
|
An optional argument 'filename' can be specified for a file located in
|
||||||
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
'pikpak:dirpath'. You may want to pass '-o password=password' for a
|
||||||
password-protected files. Also, pass '-o delete-src-file' to delete
|
password-protected files. Also, pass '-o delete-src-file' to delete
|
||||||
source files after decompression finished.
|
source files after decompression finished.
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
` + "```json" + `
|
{
|
||||||
{
|
"Decompressed": 17,
|
||||||
"Decompressed": 17,
|
"SourceDeleted": 0,
|
||||||
"SourceDeleted": 0,
|
"Errors": 0
|
||||||
"Errors": 0
|
}
|
||||||
}
|
`,
|
||||||
` + "```",
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Adding a new s3 provider
|
## Adding a new s3 provider
|
||||||
|
|
||||||
It is quite easy to add a new S3 provider to rclone.
|
It is quite easy to add a new S3 provider to rclone.
|
||||||
|
|
||||||
@@ -12,202 +12,179 @@ All tags can be found in `backend/s3/providers.go` Provider Struct.
|
|||||||
Looking through a few of the yaml files as examples should make things
|
Looking through a few of the yaml files as examples should make things
|
||||||
clear. `AWS.yaml` as the most config. pasting.
|
clear. `AWS.yaml` as the most config. pasting.
|
||||||
|
|
||||||
## YAML
|
### YAML
|
||||||
|
|
||||||
In `backend/s3/provider/YourProvider.yaml`
|
In `backend/s3/provider/YourProvider.yaml`
|
||||||
|
|
||||||
- name
|
- name
|
||||||
- description
|
- description
|
||||||
- More like the full name often "YourProvider + Object Storage"
|
- More like the full name often "YourProvider + Object Storage"
|
||||||
- [Region]
|
- [Region]
|
||||||
- Any regions your provider supports or the defaults (use `region: {}` for this)
|
- Any regions your provider supports or the defaults (use `region: {}` for this)
|
||||||
- Example from AWS.yaml:
|
- Example from AWS.yaml:
|
||||||
|
|
||||||
```yaml
|
|
||||||
region:
|
|
||||||
us-east-1: |-
|
|
||||||
The default endpoint - a good choice if you are unsure.
|
|
||||||
US Region, Northern Virginia, or Pacific Northwest.
|
|
||||||
Leave location constraint empty.
|
|
||||||
```
|
|
||||||
|
|
||||||
- The defaults (as seen in Rclone.yaml):
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
region:
|
|
||||||
"": |-
|
|
||||||
Use this if unsure.
|
|
||||||
Will use v4 signatures and an empty region.
|
|
||||||
other-v2-signature: |-
|
|
||||||
Use this only if v4 signatures don't work.
|
|
||||||
E.g. pre Jewel/v10 CEPH.
|
|
||||||
```
|
|
||||||
|
|
||||||
- [Endpoint]
|
|
||||||
- Any endpoints your provider supports
|
|
||||||
|
|
||||||
- Example from Mega.yaml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
endpoint:
|
|
||||||
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
|
||||||
```
|
|
||||||
|
|
||||||
- [Location Constraint]
|
|
||||||
- The Location Constraint of your remote, often same as region.
|
|
||||||
- Example from AWS.yaml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
location_constraint:
|
|
||||||
"": Empty for US Region, Northern Virginia, or Pacific Northwest
|
|
||||||
us-east-2: US East (Ohio) Region
|
|
||||||
```
|
|
||||||
|
|
||||||
- [ACL]
|
|
||||||
- Identical across *most* providers. Select the default with `acl: {}`
|
|
||||||
- Example from AWS.yaml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
acl:
|
|
||||||
private: |-
|
|
||||||
Owner gets FULL_CONTROL.
|
|
||||||
No one else has access rights (default).
|
|
||||||
public-read: |-
|
|
||||||
Owner gets FULL_CONTROL.
|
|
||||||
The AllUsers group gets READ access.
|
|
||||||
public-read-write: |-
|
|
||||||
Owner gets FULL_CONTROL.
|
|
||||||
The AllUsers group gets READ and WRITE access.
|
|
||||||
Granting this on a bucket is generally not recommended.
|
|
||||||
authenticated-read: |-
|
|
||||||
Owner gets FULL_CONTROL.
|
|
||||||
The AuthenticatedUsers group gets READ access.
|
|
||||||
bucket-owner-read: |-
|
|
||||||
Object owner gets FULL_CONTROL.
|
|
||||||
Bucket owner gets READ access.
|
|
||||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
|
||||||
bucket-owner-full-control: |-
|
|
||||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
|
||||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
|
||||||
```
|
|
||||||
|
|
||||||
- [Storage Class]
|
|
||||||
- Identical across *most* providers.
|
|
||||||
- Defaults from AWS.yaml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
storage_class:
|
|
||||||
"": Default
|
|
||||||
STANDARD: Standard storage class
|
|
||||||
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
|
||||||
STANDARD_IA: Standard Infrequent Access storage class
|
|
||||||
ONEZONE_IA: One Zone Infrequent Access storage class
|
|
||||||
GLACIER: Glacier Flexible Retrieval storage class
|
|
||||||
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
|
||||||
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
|
||||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
|
||||||
```
|
|
||||||
|
|
||||||
- [Server Side Encryption]
|
|
||||||
- Not common, identical across *most* providers.
|
|
||||||
- Defaults from AWS.yaml
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
server_side_encryption:
|
|
||||||
"": None
|
|
||||||
AES256: AES256
|
|
||||||
aws:kms: aws:kms
|
|
||||||
```
|
|
||||||
|
|
||||||
- [Advanced Options]
|
|
||||||
- All advanced options are Boolean - if true the configurator asks about that
|
|
||||||
value, if not it doesn't:
|
|
||||||
|
|
||||||
```go
|
|
||||||
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
|
||||||
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
|
||||||
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
|
||||||
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
|
||||||
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
|
||||||
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
|
||||||
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
|
||||||
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
|
||||||
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
|
||||||
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
|
||||||
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
|
||||||
```
|
|
||||||
|
|
||||||
- Example from AWS.yaml:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
bucket_acl: true
|
|
||||||
directory_bucket: true
|
|
||||||
leave_parts_on_error: true
|
|
||||||
requester_pays: true
|
|
||||||
sse_customer_algorithm: true
|
|
||||||
sse_customer_key: true
|
|
||||||
sse_customer_key_base64: true
|
|
||||||
sse_customer_key_md5: true
|
|
||||||
sse_kms_key_id: true
|
|
||||||
sts_endpoint: true
|
|
||||||
use_accelerate_endpoint: true
|
|
||||||
```
|
|
||||||
|
|
||||||
- Quirks
|
|
||||||
- Quirks are discovered through documentation and running the tests as seen below.
|
|
||||||
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Quirks struct {
|
|
||||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
|
||||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
|
||||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
|
||||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
|
||||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
|
||||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
|
||||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
|
||||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
|
||||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
|
||||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
|
||||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
|
||||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
|
||||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
|
||||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- Example from AWS.yaml
|
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
quirks:
|
region:
|
||||||
might_gzip: false # Never auto gzips objects
|
us-east-1: |-
|
||||||
use_unsigned_payload: false # AWS has trailer support
|
The default endpoint - a good choice if you are unsure.
|
||||||
|
US Region, Northern Virginia, or Pacific Northwest.
|
||||||
|
Leave location constraint empty.
|
||||||
|
```
|
||||||
|
- The defaults (as seen in Rclone.yaml):
|
||||||
|
```yaml
|
||||||
|
region:
|
||||||
|
"": |-
|
||||||
|
Use this if unsure.
|
||||||
|
Will use v4 signatures and an empty region.
|
||||||
|
other-v2-signature: |-
|
||||||
|
Use this only if v4 signatures don't work.
|
||||||
|
E.g. pre Jewel/v10 CEPH.
|
||||||
|
```
|
||||||
|
- [Endpoint]
|
||||||
|
- Any endpoints your provider supports
|
||||||
|
- Example from Mega.yaml
|
||||||
|
```yaml
|
||||||
|
endpoint:
|
||||||
|
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
||||||
|
```
|
||||||
|
- [Location Constraint]
|
||||||
|
- The Location Constraint of your remote, often same as region.
|
||||||
|
- Example from AWS.yaml
|
||||||
|
```yaml
|
||||||
|
location_constraint:
|
||||||
|
"": Empty for US Region, Northern Virginia, or Pacific Northwest
|
||||||
|
us-east-2: US East (Ohio) Region
|
||||||
|
```
|
||||||
|
- [ACL]
|
||||||
|
- Identical across *most* providers. Select the default with `acl: {}`
|
||||||
|
- Example from AWS.yaml
|
||||||
|
```yaml
|
||||||
|
acl:
|
||||||
|
private: |-
|
||||||
|
Owner gets FULL_CONTROL.
|
||||||
|
No one else has access rights (default).
|
||||||
|
public-read: |-
|
||||||
|
Owner gets FULL_CONTROL.
|
||||||
|
The AllUsers group gets READ access.
|
||||||
|
public-read-write: |-
|
||||||
|
Owner gets FULL_CONTROL.
|
||||||
|
The AllUsers group gets READ and WRITE access.
|
||||||
|
Granting this on a bucket is generally not recommended.
|
||||||
|
authenticated-read: |-
|
||||||
|
Owner gets FULL_CONTROL.
|
||||||
|
The AuthenticatedUsers group gets READ access.
|
||||||
|
bucket-owner-read: |-
|
||||||
|
Object owner gets FULL_CONTROL.
|
||||||
|
Bucket owner gets READ access.
|
||||||
|
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||||
|
bucket-owner-full-control: |-
|
||||||
|
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||||
|
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||||
|
```
|
||||||
|
- [Storage Class]
|
||||||
|
- Identical across *most* providers.
|
||||||
|
- Defaults from AWS.yaml
|
||||||
|
```yaml
|
||||||
|
storage_class:
|
||||||
|
"": Default
|
||||||
|
STANDARD: Standard storage class
|
||||||
|
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
||||||
|
STANDARD_IA: Standard Infrequent Access storage class
|
||||||
|
ONEZONE_IA: One Zone Infrequent Access storage class
|
||||||
|
GLACIER: Glacier Flexible Retrieval storage class
|
||||||
|
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
||||||
|
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
||||||
|
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||||
|
```
|
||||||
|
- [Server Side Encryption]
|
||||||
|
- Not common, identical across *most* providers.
|
||||||
|
- Defaults from AWS.yaml
|
||||||
|
```yaml
|
||||||
|
server_side_encryption:
|
||||||
|
"": None
|
||||||
|
AES256: AES256
|
||||||
|
aws:kms: aws:kms
|
||||||
|
```
|
||||||
|
- [Advanced Options]
|
||||||
|
- All advanced options are Boolean - if true the configurator asks about that value, if not it doesn't:
|
||||||
|
```go
|
||||||
|
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
||||||
|
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
||||||
|
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
||||||
|
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
||||||
|
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
||||||
|
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
||||||
|
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
||||||
|
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
||||||
|
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
||||||
|
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
||||||
|
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
||||||
|
```
|
||||||
|
- Example from AWS.yaml:
|
||||||
|
```yaml
|
||||||
|
bucket_acl: true
|
||||||
|
directory_bucket: true
|
||||||
|
leave_parts_on_error: true
|
||||||
|
requester_pays: true
|
||||||
|
sse_customer_algorithm: true
|
||||||
|
sse_customer_key: true
|
||||||
|
sse_customer_key_base64: true
|
||||||
|
sse_customer_key_md5: true
|
||||||
|
sse_kms_key_id: true
|
||||||
|
sts_endpoint: true
|
||||||
|
use_accelerate_endpoint: true
|
||||||
|
```
|
||||||
|
- Quirks
|
||||||
|
- Quirks are discovered through documentation and running the tests as seen below.
|
||||||
|
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
|
||||||
|
```go
|
||||||
|
type Quirks struct {
|
||||||
|
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||||
|
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||||
|
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||||
|
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||||
|
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||||
|
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||||
|
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||||
|
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||||
|
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||||
|
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||||
|
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||||
|
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||||
|
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||||
|
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
- Example from AWS.yaml
|
||||||
|
```yaml
|
||||||
|
quirks:
|
||||||
|
might_gzip: false # Never auto gzips objects
|
||||||
|
use_unsigned_payload: false # AWS has trailer support
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that if you omit a section, eg `region` then the user won't be
|
Note that if you omit a section, eg `region` then the user won't be
|
||||||
asked that question, and if you add an empty section e.g. `region: {}`
|
asked that question, and if you add an empty section e.g. `region: {}`
|
||||||
then the defaults from the `Other.yaml` will be used.
|
then the defaults from the `Other.yaml` will be used.
|
||||||
|
|
||||||
## DOCS
|
### DOCS
|
||||||
|
|
||||||
- `docs/content/s3.md`
|
- `docs/content/s3.md`
|
||||||
- Add the provider at the top of the page.
|
- Add the provider at the top of the page.
|
||||||
- Add a section about the provider linked from there.
|
- Add a section about the provider linked from there.
|
||||||
- Make sure this is in alphabetical order in the `Providers` section.
|
- Make sure this is in alphabetical order in the `Providers` section.
|
||||||
- Add a transcript of a trial `rclone config` session
|
- Add a transcript of a trial `rclone config` session
|
||||||
- Edit the transcript to remove things which might change in subsequent versions
|
- Edit the transcript to remove things which might change in subsequent versions
|
||||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||||
- Rule of thumb: don't edit anything not mentioned above.
|
- Rule of thumb: don't edit anything not mentioned above.
|
||||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||||
- This will make autogenerated changes!
|
- This will make autogenerated changes!
|
||||||
- `README.md` - this is the home page in github
|
- `README.md` - this is the home page in github
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||||
- Once you've written the docs, run `make serve` and check they look OK
|
- Once you've written the docs, run `make serve` and check they look OK
|
||||||
in the web browser and the links (internal and external) all work.
|
in the web browser and the links (internal and external) all work.
|
||||||
|
|
||||||
## TESTS
|
### TESTS
|
||||||
|
|
||||||
Once you've written the code, test `rclone config` works to your
|
Once you've written the code, test `rclone config` works to your
|
||||||
satisfaction and looks correct, and check the integration tests work
|
satisfaction and looks correct, and check the integration tests work
|
||||||
|
|||||||
@@ -137,4 +137,3 @@ use_accelerate_endpoint: true
|
|||||||
quirks:
|
quirks:
|
||||||
might_gzip: false # Never auto gzips objects
|
might_gzip: false # Never auto gzips objects
|
||||||
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
|
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
|
||||||
use_data_integrity_protections: true
|
|
||||||
|
|||||||
@@ -20,21 +20,20 @@ var NewYamlMap = orderedmap.New[string, string]
|
|||||||
|
|
||||||
// Quirks defines all the S3 provider quirks
|
// Quirks defines all the S3 provider quirks
|
||||||
type Quirks struct {
|
type Quirks struct {
|
||||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||||
UseDataIntegrityProtections *bool `yaml:"use_data_integrity_protections,omitempty"`
|
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provider defines the configurable data in each provider.yaml
|
// Provider defines the configurable data in each provider.yaml
|
||||||
|
|||||||
387
backend/s3/s3.go
387
backend/s3/s3.go
@@ -39,9 +39,6 @@ import (
|
|||||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||||
"github.com/ncw/swift/v2"
|
"github.com/ncw/swift/v2"
|
||||||
|
|
||||||
"golang.org/x/net/http/httpguts"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/chunksize"
|
"github.com/rclone/rclone/fs/chunksize"
|
||||||
@@ -62,6 +59,8 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/rclone/rclone/lib/version"
|
"github.com/rclone/rclone/lib/version"
|
||||||
|
"golang.org/x/net/http/httpguts"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@@ -575,13 +574,6 @@ circumstances or for testing.
|
|||||||
`,
|
`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "use_data_integrity_protections",
|
|
||||||
Help: `If true use AWS S3 data integrity protections.
|
|
||||||
|
|
||||||
See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)`,
|
|
||||||
Default: fs.Tristate{},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "versions",
|
Name: "versions",
|
||||||
Help: "Include old versions in directory listings.",
|
Help: "Include old versions in directory listings.",
|
||||||
@@ -900,68 +892,67 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Provider string `config:"provider"`
|
Provider string `config:"provider"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
AccessKeyID string `config:"access_key_id"`
|
AccessKeyID string `config:"access_key_id"`
|
||||||
SecretAccessKey string `config:"secret_access_key"`
|
SecretAccessKey string `config:"secret_access_key"`
|
||||||
Region string `config:"region"`
|
Region string `config:"region"`
|
||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
STSEndpoint string `config:"sts_endpoint"`
|
STSEndpoint string `config:"sts_endpoint"`
|
||||||
UseDualStack bool `config:"use_dual_stack"`
|
UseDualStack bool `config:"use_dual_stack"`
|
||||||
LocationConstraint string `config:"location_constraint"`
|
LocationConstraint string `config:"location_constraint"`
|
||||||
ACL string `config:"acl"`
|
ACL string `config:"acl"`
|
||||||
BucketACL string `config:"bucket_acl"`
|
BucketACL string `config:"bucket_acl"`
|
||||||
RequesterPays bool `config:"requester_pays"`
|
RequesterPays bool `config:"requester_pays"`
|
||||||
ServerSideEncryption string `config:"server_side_encryption"`
|
ServerSideEncryption string `config:"server_side_encryption"`
|
||||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||||
SSECustomerKey string `config:"sse_customer_key"`
|
SSECustomerKey string `config:"sse_customer_key"`
|
||||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||||
StorageClass string `config:"storage_class"`
|
StorageClass string `config:"storage_class"`
|
||||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
MaxUploadParts int `config:"max_upload_parts"`
|
MaxUploadParts int `config:"max_upload_parts"`
|
||||||
DisableChecksum bool `config:"disable_checksum"`
|
DisableChecksum bool `config:"disable_checksum"`
|
||||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||||
Profile string `config:"profile"`
|
Profile string `config:"profile"`
|
||||||
SessionToken string `config:"session_token"`
|
SessionToken string `config:"session_token"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
ForcePathStyle bool `config:"force_path_style"`
|
ForcePathStyle bool `config:"force_path_style"`
|
||||||
V2Auth bool `config:"v2_auth"`
|
V2Auth bool `config:"v2_auth"`
|
||||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||||
UseARNRegion bool `config:"use_arn_region"`
|
UseARNRegion bool `config:"use_arn_region"`
|
||||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||||
ListChunk int32 `config:"list_chunk"`
|
ListChunk int32 `config:"list_chunk"`
|
||||||
ListVersion int `config:"list_version"`
|
ListVersion int `config:"list_version"`
|
||||||
ListURLEncode fs.Tristate `config:"list_url_encode"`
|
ListURLEncode fs.Tristate `config:"list_url_encode"`
|
||||||
NoCheckBucket bool `config:"no_check_bucket"`
|
NoCheckBucket bool `config:"no_check_bucket"`
|
||||||
NoHead bool `config:"no_head"`
|
NoHead bool `config:"no_head"`
|
||||||
NoHeadObject bool `config:"no_head_object"`
|
NoHeadObject bool `config:"no_head_object"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
DisableHTTP2 bool `config:"disable_http2"`
|
DisableHTTP2 bool `config:"disable_http2"`
|
||||||
DownloadURL string `config:"download_url"`
|
DownloadURL string `config:"download_url"`
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
DirectoryMarkers bool `config:"directory_markers"`
|
||||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||||
UseDataIntegrityProtections fs.Tristate `config:"use_data_integrity_protections"`
|
Versions bool `config:"versions"`
|
||||||
Versions bool `config:"versions"`
|
VersionAt fs.Time `config:"version_at"`
|
||||||
VersionAt fs.Time `config:"version_at"`
|
VersionDeleted bool `config:"version_deleted"`
|
||||||
VersionDeleted bool `config:"version_deleted"`
|
Decompress bool `config:"decompress"`
|
||||||
Decompress bool `config:"decompress"`
|
MightGzip fs.Tristate `config:"might_gzip"`
|
||||||
MightGzip fs.Tristate `config:"might_gzip"`
|
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
UseAlreadyExists fs.Tristate `config:"use_already_exists"`
|
||||||
UseAlreadyExists fs.Tristate `config:"use_already_exists"`
|
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
DirectoryBucket bool `config:"directory_bucket"`
|
||||||
DirectoryBucket bool `config:"directory_bucket"`
|
IBMAPIKey string `config:"ibm_api_key"`
|
||||||
IBMAPIKey string `config:"ibm_api_key"`
|
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
UseXID fs.Tristate `config:"use_x_id"`
|
||||||
UseXID fs.Tristate `config:"use_x_id"`
|
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
@@ -1311,10 +1302,6 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
|||||||
} else {
|
} else {
|
||||||
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled
|
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled
|
||||||
}
|
}
|
||||||
if !opt.UseDataIntegrityProtections.Value {
|
|
||||||
s3Opt.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
|
|
||||||
s3Opt.ResponseChecksumValidation = aws.ResponseChecksumValidationWhenRequired
|
|
||||||
}
|
|
||||||
// FIXME not ported from SDK v1 - not sure what this does
|
// FIXME not ported from SDK v1 - not sure what this does
|
||||||
// s3Opt.UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
// s3Opt.UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
||||||
})
|
})
|
||||||
@@ -1510,7 +1497,6 @@ func setQuirks(opt *Options, provider *Provider) {
|
|||||||
set(&opt.ListURLEncode, true, provider.Quirks.ListURLEncode)
|
set(&opt.ListURLEncode, true, provider.Quirks.ListURLEncode)
|
||||||
set(&opt.UseMultipartEtag, true, provider.Quirks.UseMultipartEtag)
|
set(&opt.UseMultipartEtag, true, provider.Quirks.UseMultipartEtag)
|
||||||
set(&opt.UseAcceptEncodingGzip, true, provider.Quirks.UseAcceptEncodingGzip)
|
set(&opt.UseAcceptEncodingGzip, true, provider.Quirks.UseAcceptEncodingGzip)
|
||||||
set(&opt.UseDataIntegrityProtections, false, provider.Quirks.UseDataIntegrityProtections)
|
|
||||||
set(&opt.MightGzip, true, provider.Quirks.MightGzip)
|
set(&opt.MightGzip, true, provider.Quirks.MightGzip)
|
||||||
set(&opt.UseAlreadyExists, true, provider.Quirks.UseAlreadyExists)
|
set(&opt.UseAlreadyExists, true, provider.Quirks.UseAlreadyExists)
|
||||||
set(&opt.UseMultipartUploads, true, provider.Quirks.UseMultipartUploads)
|
set(&opt.UseMultipartUploads, true, provider.Quirks.UseMultipartUploads)
|
||||||
@@ -2916,118 +2902,101 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "restore",
|
Name: "restore",
|
||||||
Short: "Restore objects from GLACIER or INTELLIGENT-TIERING archive tier.",
|
Short: "Restore objects from GLACIER or INTELLIGENT-TIERING archive tier",
|
||||||
Long: `This command can be used to restore one or more objects from GLACIER to normal
|
Long: `This command can be used to restore one or more objects from GLACIER to normal storage
|
||||||
storage or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier
|
or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.
|
||||||
to the Frequent Access tier.
|
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
flags.
|
|
||||||
|
|
||||||
` + "```console" + `
|
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
All the objects shown will be marked for restore, then:
|
All the objects shown will be marked for restore, then
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a list of status dictionaries with Remote and Status
|
It returns a list of status dictionaries with Remote and Status
|
||||||
keys. The Status will be OK if it was successful or an error message
|
keys. The Status will be OK if it was successful or an error message
|
||||||
if not.
|
if not.
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"Status": "OK",
|
||||||
"Status": "OK",
|
"Remote": "test.txt"
|
||||||
"Remote": "test.txt"
|
},
|
||||||
},
|
{
|
||||||
{
|
"Status": "OK",
|
||||||
"Status": "OK",
|
"Remote": "test/file4.txt"
|
||||||
"Remote": "test/file4.txt"
|
}
|
||||||
}
|
]
|
||||||
]
|
|
||||||
` + "```",
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||||
"lifetime": `Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING
|
"lifetime": "Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING storage",
|
||||||
storage.`,
|
|
||||||
"description": "The optional description for the job.",
|
"description": "The optional description for the job.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "restore-status",
|
Name: "restore-status",
|
||||||
Short: "Show the status for objects being restored from GLACIER or INTELLIGENT-TIERING.",
|
Short: "Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage",
|
||||||
Long: `This command can be used to show the status for objects being restored from
|
Long: `This command can be used to show the status for objects being restored from GLACIER to normal storage
|
||||||
GLACIER to normal storage or from INTELLIGENT-TIERING Archive Access / Deep
|
or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.
|
||||||
Archive Access tier to the Frequent Access tier.
|
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend restore-status s3:bucket/path/to/object
|
||||||
rclone backend restore-status s3:bucket/path/to/object
|
rclone backend restore-status s3:bucket/path/to/directory
|
||||||
rclone backend restore-status s3:bucket/path/to/directory
|
rclone backend restore-status -o all s3:bucket/path/to/directory
|
||||||
rclone backend restore-status -o all s3:bucket/path/to/directory
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
This command does not obey the filters.
|
This command does not obey the filters.
|
||||||
|
|
||||||
It returns a list of status dictionaries:
|
It returns a list of status dictionaries.
|
||||||
|
|
||||||
` + "```json" + `
|
[
|
||||||
[
|
{
|
||||||
{
|
"Remote": "file.txt",
|
||||||
"Remote": "file.txt",
|
"VersionID": null,
|
||||||
"VersionID": null,
|
"RestoreStatus": {
|
||||||
"RestoreStatus": {
|
"IsRestoreInProgress": true,
|
||||||
"IsRestoreInProgress": true,
|
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
},
|
||||||
|
"StorageClass": "GLACIER"
|
||||||
},
|
},
|
||||||
"StorageClass": "GLACIER"
|
{
|
||||||
},
|
"Remote": "test.pdf",
|
||||||
{
|
"VersionID": null,
|
||||||
"Remote": "test.pdf",
|
"RestoreStatus": {
|
||||||
"VersionID": null,
|
"IsRestoreInProgress": false,
|
||||||
"RestoreStatus": {
|
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||||
"IsRestoreInProgress": false,
|
},
|
||||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
"StorageClass": "DEEP_ARCHIVE"
|
||||||
},
|
},
|
||||||
"StorageClass": "DEEP_ARCHIVE"
|
{
|
||||||
},
|
"Remote": "test.gz",
|
||||||
{
|
"VersionID": null,
|
||||||
"Remote": "test.gz",
|
"RestoreStatus": {
|
||||||
"VersionID": null,
|
"IsRestoreInProgress": true,
|
||||||
"RestoreStatus": {
|
"RestoreExpiryDate": "null"
|
||||||
"IsRestoreInProgress": true,
|
},
|
||||||
"RestoreExpiryDate": "null"
|
"StorageClass": "INTELLIGENT_TIERING"
|
||||||
},
|
}
|
||||||
"StorageClass": "INTELLIGENT_TIERING"
|
]
|
||||||
}
|
`,
|
||||||
]
|
|
||||||
` + "```",
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"all": "If set then show all objects, not just ones with restore status.",
|
"all": "if set then show all objects, not just ones with restore status",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "list-multipart-uploads",
|
Name: "list-multipart-uploads",
|
||||||
Short: "List the unfinished multipart uploads.",
|
Short: "List the unfinished multipart uploads",
|
||||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||||
|
|
||||||
Usage examples:
|
rclone backend list-multipart s3:bucket/path/to/object
|
||||||
|
|
||||||
` + "```console" + `
|
|
||||||
rclone backend list-multipart s3:bucket/path/to/object
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It returns a dictionary of buckets with values as lists of unfinished
|
It returns a dictionary of buckets with values as lists of unfinished
|
||||||
multipart uploads.
|
multipart uploads.
|
||||||
@@ -3035,47 +3004,44 @@ multipart uploads.
|
|||||||
You can call it with no bucket in which case it lists all bucket, with
|
You can call it with no bucket in which case it lists all bucket, with
|
||||||
a bucket or with a bucket and path.
|
a bucket or with a bucket and path.
|
||||||
|
|
||||||
` + "```json" + `
|
{
|
||||||
{
|
"rclone": [
|
||||||
"rclone": [
|
|
||||||
{
|
{
|
||||||
"Initiated": "2020-06-26T14:20:36Z",
|
"Initiated": "2020-06-26T14:20:36Z",
|
||||||
"Initiator": {
|
"Initiator": {
|
||||||
"DisplayName": "XXX",
|
"DisplayName": "XXX",
|
||||||
"ID": "arn:aws:iam::XXX:user/XXX"
|
"ID": "arn:aws:iam::XXX:user/XXX"
|
||||||
},
|
},
|
||||||
"Key": "KEY",
|
"Key": "KEY",
|
||||||
"Owner": {
|
"Owner": {
|
||||||
"DisplayName": null,
|
"DisplayName": null,
|
||||||
"ID": "XXX"
|
"ID": "XXX"
|
||||||
},
|
},
|
||||||
"StorageClass": "STANDARD",
|
"StorageClass": "STANDARD",
|
||||||
"UploadId": "XXX"
|
"UploadId": "XXX"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"rclone-1000files": [],
|
"rclone-1000files": [],
|
||||||
"rclone-dst": []
|
"rclone-dst": []
|
||||||
}
|
}
|
||||||
` + "```",
|
|
||||||
|
`,
|
||||||
}, {
|
}, {
|
||||||
Name: "cleanup",
|
Name: "cleanup",
|
||||||
Short: "Remove unfinished multipart uploads.",
|
Short: "Remove unfinished multipart uploads.",
|
||||||
Long: `This command removes unfinished multipart uploads of age greater than
|
Long: `This command removes unfinished multipart uploads of age greater than
|
||||||
max-age which defaults to 24 hours.
|
max-age which defaults to 24 hours.
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
what it would do.
|
it would do.
|
||||||
|
|
||||||
Usage examples:
|
rclone backend cleanup s3:bucket/path/to/object
|
||||||
|
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||||
|
|
||||||
` + "```console" + `
|
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||||
rclone backend cleanup s3:bucket/path/to/object
|
`,
|
||||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"max-age": "Max age of upload to delete.",
|
"max-age": "Max age of upload to delete",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "cleanup-hidden",
|
Name: "cleanup-hidden",
|
||||||
@@ -3083,14 +3049,11 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.`,
|
|||||||
Long: `This command removes any old hidden versions of files
|
Long: `This command removes any old hidden versions of files
|
||||||
on a versions enabled bucket.
|
on a versions enabled bucket.
|
||||||
|
|
||||||
Note that you can use --interactive/-i or --dry-run with this command to see
|
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||||
what it would do.
|
it would do.
|
||||||
|
|
||||||
Usage example:
|
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||||
|
`,
|
||||||
` + "```console" + `
|
|
||||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
|
||||||
` + "```",
|
|
||||||
}, {
|
}, {
|
||||||
Name: "versioning",
|
Name: "versioning",
|
||||||
Short: "Set/get versioning support for a bucket.",
|
Short: "Set/get versioning support for a bucket.",
|
||||||
@@ -3098,29 +3061,24 @@ rclone backend cleanup-hidden s3:bucket/path/to/dir
|
|||||||
passed and then returns the current versioning status for the bucket
|
passed and then returns the current versioning status for the bucket
|
||||||
supplied.
|
supplied.
|
||||||
|
|
||||||
Usage examples:
|
rclone backend versioning s3:bucket # read status only
|
||||||
|
rclone backend versioning s3:bucket Enabled
|
||||||
|
rclone backend versioning s3:bucket Suspended
|
||||||
|
|
||||||
` + "```console" + `
|
It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
|
||||||
rclone backend versioning s3:bucket # read status only
|
has been enabled the status can't be set back to "Unversioned".
|
||||||
rclone backend versioning s3:bucket Enabled
|
`,
|
||||||
rclone backend versioning s3:bucket Suspended
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
It may return "Enabled", "Suspended" or "Unversioned". Note that once
|
|
||||||
versioning has been enabled the status can't be set back to "Unversioned".`,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Short: "Set command for updating the config parameters.",
|
Short: "Set command for updating the config parameters.",
|
||||||
Long: `This set command can be used to update the config parameters
|
Long: `This set command can be used to update the config parameters
|
||||||
for a running s3 backend.
|
for a running s3 backend.
|
||||||
|
|
||||||
Usage examples:
|
Usage Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||||
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
||||||
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
|
||||||
` + "```" + `
|
|
||||||
|
|
||||||
The option keys are named as they are in the config file.
|
The option keys are named as they are in the config file.
|
||||||
|
|
||||||
@@ -3128,7 +3086,8 @@ This rebuilds the connection to the s3 backend when it is called with
|
|||||||
the new parameters. Only new parameters need be passed as the values
|
the new parameters. Only new parameters need be passed as the values
|
||||||
will default to those currently in use.
|
will default to those currently in use.
|
||||||
|
|
||||||
It doesn't return anything.`,
|
It doesn't return anything.
|
||||||
|
`,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// Command the backend to run a named command
|
// Command the backend to run a named command
|
||||||
@@ -4552,10 +4511,6 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
ui.req.ContentLanguage = aws.String(value)
|
ui.req.ContentLanguage = aws.String(value)
|
||||||
case "content-type":
|
case "content-type":
|
||||||
ui.req.ContentType = aws.String(value)
|
ui.req.ContentType = aws.String(value)
|
||||||
case "if-match":
|
|
||||||
ui.req.IfMatch = aws.String(value)
|
|
||||||
case "if-none-match":
|
|
||||||
ui.req.IfNoneMatch = aws.String(value)
|
|
||||||
case "x-amz-tagging":
|
case "x-amz-tagging":
|
||||||
ui.req.Tagging = aws.String(value)
|
ui.req.Tagging = aws.String(value)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -801,7 +801,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
req := &api.GetDownloadLinkRequest{
|
req := &api.GetDownloadLinkRequest{
|
||||||
Slug: o.slug,
|
Slug: o.slug,
|
||||||
UserLogin: o.fs.opt.Username,
|
UserLogin: o.fs.opt.Username,
|
||||||
DeviceID: fmt.Sprintf("%d", time.Now().UnixNano()),
|
// Has to be set but doesn't seem to be used server side.
|
||||||
|
DeviceID: "foobar",
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp *api.GetDownloadLinkResponse
|
var resp *api.GetDownloadLinkResponse
|
||||||
@@ -814,26 +815,16 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
downloadURL := resp.Link
|
|
||||||
if resp.Hash != "" {
|
|
||||||
if strings.Contains(downloadURL, "?") {
|
|
||||||
downloadURL += "&"
|
|
||||||
} else {
|
|
||||||
downloadURL += "?"
|
|
||||||
}
|
|
||||||
downloadURL += "hash=" + url.QueryEscape(resp.Hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
opts = rest.Opts{
|
opts = rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: downloadURL,
|
RootURL: resp.Link,
|
||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
|
|
||||||
var httpResp *http.Response
|
var httpResp *http.Response
|
||||||
|
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
httpResp, err = o.fs.rest.Call(ctx, &opts)
|
httpResp, err = o.fs.cdn.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetry(ctx, httpResp, err, true)
|
return o.fs.shouldRetry(ctx, httpResp, err, true)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenO
|
|||||||
size, err := u.upload.stream.Read(data)
|
size, err := u.upload.stream.Read(data)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data stream: %v", cnt, err)
|
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -961,7 +961,7 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaData reads and sets the new metadata for a storage.Object
|
// readMetaData reads ands sets the new metadata for a storage.Object
|
||||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -51,9 +51,9 @@ def find_regions(lines):
|
|||||||
regions = []
|
regions = []
|
||||||
start = None
|
start = None
|
||||||
for i, line in enumerate(lines, 1):
|
for i, line in enumerate(lines, 1):
|
||||||
if line.lstrip().startswith("<!-- autogenerated options start "):
|
if "rem autogenerated options start" in line:
|
||||||
start = i
|
start = i
|
||||||
elif start is not None and line.rstrip().endswith(" autogenerated options stop -->"):
|
elif "rem autogenerated options stop" in line and start is not None:
|
||||||
regions.append((start, i))
|
regions.append((start, i))
|
||||||
start = None
|
start = None
|
||||||
return regions
|
return regions
|
||||||
|
|||||||
@@ -9,12 +9,10 @@ import io
|
|||||||
import subprocess
|
import subprocess
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
begin = "<!-- "
|
marker = "{{< rem autogenerated options"
|
||||||
end = " -->"
|
start = marker + " start"
|
||||||
marker = "autogenerated options"
|
stop = marker + " stop"
|
||||||
line_marker_start_prefix = begin + marker + " start "
|
end = ">}}"
|
||||||
line_marker_stop = begin + marker + " stop" + end
|
|
||||||
markdownlint_disable = begin + "markdownlint-disable-line line-length" + end
|
|
||||||
|
|
||||||
def find_backends():
|
def find_backends():
|
||||||
"""Return a list of all backends"""
|
"""Return a list of all backends"""
|
||||||
@@ -29,7 +27,7 @@ def output_backend_tool_docs(backend, out, cwd):
|
|||||||
"""Output documentation for backend tool to out"""
|
"""Output documentation for backend tool to out"""
|
||||||
out.flush()
|
out.flush()
|
||||||
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
def alter_doc(backend):
|
def alter_doc(backend):
|
||||||
"""Alter the documentation for backend"""
|
"""Alter the documentation for backend"""
|
||||||
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
|
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
|
||||||
@@ -45,23 +43,23 @@ def alter_doc(backend):
|
|||||||
in_docs = False
|
in_docs = False
|
||||||
for line in in_file:
|
for line in in_file:
|
||||||
if not in_docs:
|
if not in_docs:
|
||||||
if line.lstrip().startswith(line_marker_start_prefix):
|
if start in line:
|
||||||
in_docs = True
|
in_docs = True
|
||||||
line_marker_start = (line_marker_start_prefix + "- DO NOT EDIT - instead edit fs.RegInfo in backend/%s/%s.go and run make backenddocs to verify" + end) % (backend, backend)
|
start_full = (start + "\" - DO NOT EDIT - instead edit fs.RegInfo in backend/%s/%s.go then run make backenddocs\" " + end + "\n") % (backend, backend)
|
||||||
out_file.write(line_marker_start + " " + markdownlint_disable + "\n")
|
out_file.write(start_full)
|
||||||
output_docs(backend, out_file, rclone_bin_dir)
|
output_docs(backend, out_file, rclone_bin_dir)
|
||||||
output_backend_tool_docs(backend, out_file, rclone_bin_dir)
|
output_backend_tool_docs(backend, out_file, rclone_bin_dir)
|
||||||
out_file.write(line_marker_stop + "\n")
|
out_file.write(stop+" "+end+"\n")
|
||||||
altered = True
|
altered = True
|
||||||
if not in_docs:
|
if not in_docs:
|
||||||
out_file.write(line)
|
out_file.write(line)
|
||||||
if in_docs:
|
if in_docs:
|
||||||
if line.strip() == line_marker_stop:
|
if stop in line:
|
||||||
in_docs = False
|
in_docs = False
|
||||||
os.rename(doc_file, doc_file+"~")
|
os.rename(doc_file, doc_file+"~")
|
||||||
os.rename(new_file, doc_file)
|
os.rename(new_file, doc_file)
|
||||||
if not altered:
|
if not altered:
|
||||||
raise ValueError("Didn't find '%s' markers in %s" % (line_marker_start_prefix, doc_file))
|
raise ValueError("Didn't find '%s' markers for in %s" % (start, doc_file))
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
def main(args):
|
||||||
|
|||||||
@@ -152,7 +152,7 @@ def read_doc(doc):
|
|||||||
# Make [...](/links/) absolute
|
# Make [...](/links/) absolute
|
||||||
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
|
contents = re.sub(r'\]\((\/.*?\/(#.*)?)\)', r"](https://rclone.org\1)", contents)
|
||||||
# Add additional links on the front page
|
# Add additional links on the front page
|
||||||
contents = re.sub(r'<!-- MAINPAGELINK -->', "- [Donate.](https://rclone.org/donate/)", contents)
|
contents = re.sub(r'\{\{< rem MAINPAGELINK >\}\}', "- [Donate.](https://rclone.org/donate/)", contents)
|
||||||
# Interpret provider shortcode
|
# Interpret provider shortcode
|
||||||
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
# {{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
|
||||||
contents = re.sub(r'\{\{<\s*provider.*?name="(.*?)".*?>\}\}', r"- \1", contents)
|
contents = re.sub(r'\{\{<\s*provider.*?name="(.*?)".*?>\}\}', r"- \1", contents)
|
||||||
|
|||||||
@@ -15,8 +15,8 @@ fusermount -u -z /tmp/rclone/rc_mount > /dev/null 2>&1 || umount /tmp/rclone/rc_
|
|||||||
|
|
||||||
awk '
|
awk '
|
||||||
BEGIN {p=1}
|
BEGIN {p=1}
|
||||||
/^<!-- autogenerated start/ {print;system("cat /tmp/rclone/z.md");p=0}
|
/^\{\{< rem autogenerated start/ {print;system("cat /tmp/rclone/z.md");p=0}
|
||||||
/^<!-- autogenerated stop/ {p=1}
|
/^\{\{< rem autogenerated stop/ {p=1}
|
||||||
p' docs/content/rc.md > /tmp/rclone/rc.md
|
p' docs/content/rc.md > /tmp/rclone/rc.md
|
||||||
|
|
||||||
mv /tmp/rclone/rc.md docs/content/rc.md
|
mv /tmp/rclone/rc.md docs/content/rc.md
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
//go:build !plan9
|
//go:build !plan9
|
||||||
|
|
||||||
// Package list implements 'rclone archive list'
|
// Package list inplements 'rclone archive list'
|
||||||
package list
|
package list
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ see the backend docs for definitions.
|
|||||||
|
|
||||||
You can discover what commands a backend implements by using
|
You can discover what commands a backend implements by using
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone backend help remote:
|
rclone backend help remote:
|
||||||
rclone backend help <backendname>
|
rclone backend help <backendname>
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
@@ -46,19 +46,19 @@ You can also discover information about the backend using (see
|
|||||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||||
for more info).
|
for more info).
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone backend features remote:
|
rclone backend features remote:
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone backend stats remote:path stats -o format=json -o long
|
rclone backend stats remote:path stats -o format=json -o long
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Pass arguments to the backend by placing them on the end of the line
|
Pass arguments to the backend by placing them on the end of the line
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone backend cleanup remote:path file1 file2 file3
|
rclone backend cleanup remote:path file1 file2 file3
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
@@ -156,11 +156,9 @@ func showHelp(fsInfo *fs.RegInfo) error {
|
|||||||
fmt.Printf("## Backend commands\n\n")
|
fmt.Printf("## Backend commands\n\n")
|
||||||
fmt.Printf(`Here are the commands specific to the %s backend.
|
fmt.Printf(`Here are the commands specific to the %s backend.
|
||||||
|
|
||||||
Run them with:
|
Run them with
|
||||||
|
|
||||||
`+"```console"+`
|
rclone backend COMMAND remote:
|
||||||
rclone backend COMMAND remote:
|
|
||||||
`+"```"+`
|
|
||||||
|
|
||||||
The help below will explain what arguments each command takes.
|
The help below will explain what arguments each command takes.
|
||||||
|
|
||||||
@@ -174,7 +172,7 @@ These can be run on a running backend using the rc command
|
|||||||
for _, cmd := range cmds {
|
for _, cmd := range cmds {
|
||||||
fmt.Printf("### %s\n\n", cmd.Name)
|
fmt.Printf("### %s\n\n", cmd.Name)
|
||||||
fmt.Printf("%s\n\n", cmd.Short)
|
fmt.Printf("%s\n\n", cmd.Short)
|
||||||
fmt.Printf("```console\nrclone backend %s remote: [options] [<arguments>+]\n```\n\n", cmd.Name)
|
fmt.Printf(" rclone backend %s remote: [options] [<arguments>+]\n\n", cmd.Name)
|
||||||
if cmd.Long != "" {
|
if cmd.Long != "" {
|
||||||
fmt.Printf("%s\n\n", cmd.Long)
|
fmt.Printf("%s\n\n", cmd.Long)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,12 +125,12 @@ func (b *bisyncRun) ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DownloadCheckFn is a slightly modified version of Check with --download
|
// DownloadCheckFn is a slightly modified version of Check with --download
|
||||||
func DownloadCheckFn(ctx context.Context, dst, src fs.Object) (equal bool, noHash bool, err error) {
|
func DownloadCheckFn(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
|
||||||
equal, err = operations.CheckIdenticalDownload(ctx, src, dst)
|
differ, err = operations.CheckIdenticalDownload(ctx, a, b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return true, true, fmt.Errorf("failed to download: %w", err)
|
return true, true, fmt.Errorf("failed to download: %w", err)
|
||||||
}
|
}
|
||||||
return !equal, false, nil
|
return differ, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// check potential conflicts (to avoid renaming if already identical)
|
// check potential conflicts (to avoid renaming if already identical)
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ name. If the source is a directory then it acts exactly like the
|
|||||||
|
|
||||||
So
|
So
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone copyto src dst
|
rclone copyto src dst
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func init() {
|
|||||||
var commandDefinition = &cobra.Command{
|
var commandDefinition = &cobra.Command{
|
||||||
Use: "cryptcheck remote:path cryptedremote:path",
|
Use: "cryptcheck remote:path cryptedremote:path",
|
||||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||||
Long: `Checks a remote against an [encrypted](/crypt/) remote. This is the equivalent
|
Long: `Checks a remote against a [crypted](/crypt/) remote. This is the equivalent
|
||||||
of running rclone [check](/commands/rclone_check/), but able to check the
|
of running rclone [check](/commands/rclone_check/), but able to check the
|
||||||
checksums of the encrypted remote.
|
checksums of the encrypted remote.
|
||||||
|
|
||||||
@@ -37,14 +37,14 @@ checksum of the file it has just encrypted.
|
|||||||
|
|
||||||
Use it like this
|
Use it like this
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone cryptcheck /path/to/files encryptedremote:path
|
rclone cryptcheck /path/to/files encryptedremote:path
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
You can use it like this also, but that will involve downloading all
|
You can use it like this also, but that will involve downloading all
|
||||||
the files in ` + "`remote:path`" + `.
|
the files in ` + "`remote:path`" + `.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone cryptcheck remote:path encryptedremote:path
|
rclone cryptcheck remote:path encryptedremote:path
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
|
|||||||
|
|
||||||
use it like this
|
use it like this
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ Here is an example run.
|
|||||||
|
|
||||||
Before - with duplicates
|
Before - with duplicates
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsl drive:dupes
|
$ rclone lsl drive:dupes
|
||||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||||
@@ -81,7 +81,7 @@ $ rclone lsl drive:dupes
|
|||||||
|
|
||||||
Now the ` + "`dedupe`" + ` session
|
Now the ` + "`dedupe`" + ` session
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone dedupe drive:dupes
|
$ rclone dedupe drive:dupes
|
||||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||||
one.txt: Found 4 files with duplicate names
|
one.txt: Found 4 files with duplicate names
|
||||||
@@ -111,7 +111,7 @@ two-3.txt: renamed from: two.txt
|
|||||||
|
|
||||||
The result being
|
The result being
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsl drive:dupes
|
$ rclone lsl drive:dupes
|
||||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||||
@@ -135,13 +135,13 @@ or by using an extra parameter with the same value
|
|||||||
For example, to rename all the identically named photos in your Google Photos
|
For example, to rename all the identically named photos in your Google Photos
|
||||||
directory, do
|
directory, do
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone dedupe rename "drive:Google Photos"
|
rclone dedupe rename "drive:Google Photos"
|
||||||
` + "```",
|
` + "```",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -20,13 +20,13 @@ var bashCommandDefinition = &cobra.Command{
|
|||||||
|
|
||||||
By default, when run without any arguments,
|
By default, when run without any arguments,
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone completion bash
|
rclone completion bash
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
the generated script will be written to
|
the generated script will be written to
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
/etc/bash_completion.d/rclone
|
/etc/bash_completion.d/rclone
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ can logout and login again to use the autocompletion script.
|
|||||||
|
|
||||||
Alternatively, you can source the script directly
|
Alternatively, you can source the script directly
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
. /path/to/my_bash_completion_scripts/rclone
|
. /path/to/my_bash_completion_scripts/rclone
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -21,14 +21,14 @@ var fishCommandDefinition = &cobra.Command{
|
|||||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||||
probably need to be run with sudo or as root, e.g.
|
probably need to be run with sudo or as root, e.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
sudo rclone completion fish
|
sudo rclone completion fish
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Logout and login again to use the autocompletion scripts, or source
|
Logout and login again to use the autocompletion scripts, or source
|
||||||
them directly
|
them directly
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
. /etc/fish/completions/rclone.fish
|
. /etc/fish/completions/rclone.fish
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ var powershellCommandDefinition = &cobra.Command{
|
|||||||
|
|
||||||
To load completions in your current shell session:
|
To load completions in your current shell session:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone completion powershell | Out-String | Invoke-Expression
|
rclone completion powershell | Out-String | Invoke-Expression
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -21,14 +21,14 @@ var zshCommandDefinition = &cobra.Command{
|
|||||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||||
probably need to be run with sudo or as root, e.g.
|
probably need to be run with sudo or as root, e.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
sudo rclone completion zsh
|
sudo rclone completion zsh
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Logout and login again to use the autocompletion scripts, or source
|
Logout and login again to use the autocompletion scripts, or source
|
||||||
them directly
|
them directly
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
autoload -U compinit && compinit
|
autoload -U compinit && compinit
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -11,14 +11,11 @@ users.
|
|||||||
name. This symlink helps git-annex tell rclone it wants to run the "gitannex"
|
name. This symlink helps git-annex tell rclone it wants to run the "gitannex"
|
||||||
subcommand.
|
subcommand.
|
||||||
|
|
||||||
Create the helper symlink in "$HOME/bin":
|
```sh
|
||||||
|
# Create the helper symlink in "$HOME/bin".
|
||||||
```console
|
|
||||||
ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin"
|
ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin"
|
||||||
|
|
||||||
Verify the new symlink is on your PATH:
|
# Verify the new symlink is on your PATH.
|
||||||
|
|
||||||
```console
|
|
||||||
which git-annex-remote-rclone-builtin
|
which git-annex-remote-rclone-builtin
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -30,15 +27,11 @@ users.
|
|||||||
Start by asking git-annex to describe the remote's available configuration
|
Start by asking git-annex to describe the remote's available configuration
|
||||||
parameters.
|
parameters.
|
||||||
|
|
||||||
If you skipped step 1:
|
```sh
|
||||||
|
# If you skipped step 1:
|
||||||
```console
|
|
||||||
git annex initremote MyRemote type=rclone --whatelse
|
git annex initremote MyRemote type=rclone --whatelse
|
||||||
```
|
|
||||||
|
|
||||||
If you created a symlink in step 1:
|
# If you created a symlink in step 1:
|
||||||
|
|
||||||
```console
|
|
||||||
git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse
|
git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -54,7 +47,7 @@ users.
|
|||||||
be one configured in your rclone.conf file, which can be located with `rclone
|
be one configured in your rclone.conf file, which can be located with `rclone
|
||||||
config file`.
|
config file`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git annex initremote MyRemote \
|
git annex initremote MyRemote \
|
||||||
type=external \
|
type=external \
|
||||||
externaltype=rclone-builtin \
|
externaltype=rclone-builtin \
|
||||||
@@ -68,7 +61,7 @@ users.
|
|||||||
remote**. This command is very new and has not been tested on many rclone
|
remote**. This command is very new and has not been tested on many rclone
|
||||||
backends. Caveat emptor!
|
backends. Caveat emptor!
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
git annex testremote MyRemote
|
git annex testremote MyRemote
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -103,13 +103,13 @@ as a relative path).
|
|||||||
|
|
||||||
Run without a hash to see the list of all supported hashes, e.g.
|
Run without a hash to see the list of all supported hashes, e.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone hashsum
|
$ rclone hashsum
|
||||||
` + hash.HelpString(0) + "```" + `
|
` + hash.HelpString(0) + "```" + `
|
||||||
|
|
||||||
Then
|
Then
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone hashsum MD5 remote:path
|
rclone hashsum MD5 remote:path
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -343,12 +343,12 @@ func showBackend(name string) {
|
|||||||
fmt.Printf("- Examples:\n")
|
fmt.Printf("- Examples:\n")
|
||||||
}
|
}
|
||||||
for _, ex := range opt.Examples {
|
for _, ex := range opt.Examples {
|
||||||
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
fmt.Printf(" - %s\n", quoteString(ex.Value))
|
||||||
for line := range strings.SplitSeq(ex.Help, "\n") {
|
for line := range strings.SplitSeq(ex.Help, "\n") {
|
||||||
fmt.Printf(" - %s\n", line)
|
fmt.Printf(" - %s\n", line)
|
||||||
}
|
}
|
||||||
if ex.Provider != "" {
|
if ex.Provider != "" {
|
||||||
fmt.Printf(" - Provider: %s\n", ex.Provider)
|
fmt.Printf(" - Provider: %s\n", ex.Provider)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Short: `Generate public link to file/folder.`,
|
Short: `Generate public link to file/folder.`,
|
||||||
Long: `Create, retrieve or remove a public link to the given file or folder.
|
Long: `Create, retrieve or remove a public link to the given file or folder.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone link remote:path/to/file
|
rclone link remote:path/to/file
|
||||||
rclone link remote:path/to/folder/
|
rclone link remote:path/to/folder/
|
||||||
rclone link --unlink remote:path/to/folder/
|
rclone link --unlink remote:path/to/folder/
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ readable format with size and path. Recurses by default.
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone ls swift:bucket
|
$ rclone ls swift:bucket
|
||||||
60295 bevajer5jef
|
60295 bevajer5jef
|
||||||
90613 canole
|
90613 canole
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ not), the modification time (if known, the current time if not), the
|
|||||||
number of objects in the directory (if known, -1 if not) and the name
|
number of objects in the directory (if known, -1 if not) and the name
|
||||||
of the directory, E.g.
|
of the directory, E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsd swift:
|
$ rclone lsd swift:
|
||||||
494000 2018-04-26 08:43:20 10000 10000files
|
494000 2018-04-26 08:43:20 10000 10000files
|
||||||
65 2018-04-26 08:43:20 1 1File
|
65 2018-04-26 08:43:20 1 1File
|
||||||
@@ -42,7 +42,7 @@ $ rclone lsd swift:
|
|||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsd drive:test
|
$ rclone lsd drive:test
|
||||||
-1 2016-10-17 17:41:53 -1 1000files
|
-1 2016-10-17 17:41:53 -1 1000files
|
||||||
-1 2017-01-03 14:40:54 -1 2500files
|
-1 2017-01-03 14:40:54 -1 2500files
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ one per line. The directories will have a / suffix.
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsf swift:bucket
|
$ rclone lsf swift:bucket
|
||||||
bevajer5jef
|
bevajer5jef
|
||||||
canole
|
canole
|
||||||
@@ -85,7 +85,7 @@ So if you wanted the path, size and modification time, you would use
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsf --format "tsp" swift:bucket
|
$ rclone lsf --format "tsp" swift:bucket
|
||||||
2016-06-25 18:55:41;60295;bevajer5jef
|
2016-06-25 18:55:41;60295;bevajer5jef
|
||||||
2016-06-25 18:55:43;90613;canole
|
2016-06-25 18:55:43;90613;canole
|
||||||
@@ -103,13 +103,13 @@ type.
|
|||||||
|
|
||||||
For example, to emulate the md5sum command you can use
|
For example, to emulate the md5sum command you can use
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||||
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
||||||
cd65ac234e6fea5925974a51cdd865cc canole
|
cd65ac234e6fea5925974a51cdd865cc canole
|
||||||
@@ -126,7 +126,7 @@ putting it last is a good strategy.
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
||||||
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
||||||
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
||||||
@@ -140,7 +140,7 @@ if they contain,
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsf --csv --files-only --format ps remote:path
|
$ rclone lsf --csv --files-only --format ps remote:path
|
||||||
test.log,22355
|
test.log,22355
|
||||||
test.sh,449
|
test.sh,449
|
||||||
@@ -153,7 +153,7 @@ to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
|||||||
For example, to find all the files modified within one day and copy
|
For example, to find all the files modified within one day and copy
|
||||||
those only (without traversing the whole directory structure):
|
those only (without traversing the whole directory structure):
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
@@ -162,7 +162,7 @@ The default time format is ` + "`'2006-01-02 15:04:05'`" + `.
|
|||||||
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with
|
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with
|
||||||
the ` + "`--time-format`" + ` flag. Examples:
|
the ` + "`--time-format`" + ` flag. Examples:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
||||||
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
||||||
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ readable format with modification time, size and path. Recurses by default.
|
|||||||
|
|
||||||
E.g.
|
E.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone lsl swift:bucket
|
$ rclone lsl swift:bucket
|
||||||
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
||||||
90613 2016-06-25 18:55:43.302607074 canole
|
90613 2016-06-25 18:55:43.302607074 canole
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ mount, waits until success or timeout and exits with appropriate code
|
|||||||
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
||||||
is an **empty** **existing** directory:
|
is an **empty** **existing** directory:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files /path/to/local/mount
|
rclone @ remote:path/to/files /path/to/local/mount
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -32,7 +32,7 @@ and is not supported when [mounting as a network drive](#mounting-modes-on-windo
|
|||||||
and the last example will mount as network share `\\cloud\remote` and map it to an
|
and the last example will mount as network share `\\cloud\remote` and map it to an
|
||||||
automatically assigned drive:
|
automatically assigned drive:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files *
|
rclone @ remote:path/to/files *
|
||||||
rclone @ remote:path/to/files X:
|
rclone @ remote:path/to/files X:
|
||||||
rclone @ remote:path/to/files C:\path\parent\mount
|
rclone @ remote:path/to/files C:\path\parent\mount
|
||||||
@@ -44,7 +44,7 @@ a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
|||||||
|
|
||||||
When running in background mode the user will have to stop the mount manually:
|
When running in background mode the user will have to stop the mount manually:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
# Linux
|
# Linux
|
||||||
fusermount -u /path/to/local/mount
|
fusermount -u /path/to/local/mount
|
||||||
#... or on some systems
|
#... or on some systems
|
||||||
@@ -65,7 +65,7 @@ at all, then 1 PiB is set as both the total and the free size.
|
|||||||
|
|
||||||
### Installing on Windows
|
### Installing on Windows
|
||||||
|
|
||||||
To run `rclone @ on Windows`, you will need to
|
To run rclone @ on Windows, you will need to
|
||||||
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
download and install [WinFsp](http://www.secfs.net/winfsp/).
|
||||||
|
|
||||||
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
[WinFsp](https://github.com/winfsp/winfsp) is an open-source
|
||||||
@@ -96,7 +96,7 @@ directory or drive. Using the special value `*` will tell rclone to
|
|||||||
automatically assign the next available drive letter, starting with Z: and moving
|
automatically assign the next available drive letter, starting with Z: and moving
|
||||||
backward. Examples:
|
backward. Examples:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files *
|
rclone @ remote:path/to/files *
|
||||||
rclone @ remote:path/to/files X:
|
rclone @ remote:path/to/files X:
|
||||||
rclone @ remote:path/to/files C:\path\parent\mount
|
rclone @ remote:path/to/files C:\path\parent\mount
|
||||||
@@ -111,7 +111,7 @@ to your @ command. Mounting to a directory path is not supported in
|
|||||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||||
be mounted to a drive letter.
|
be mounted to a drive letter.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files X: --network-mode
|
rclone @ remote:path/to/files X: --network-mode
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -129,7 +129,7 @@ volume label for the mapped drive, shown in Windows Explorer etc, while the comp
|
|||||||
If you specify a full network share UNC path with `--volname`, this will implicitly
|
If you specify a full network share UNC path with `--volname`, this will implicitly
|
||||||
set the `--network-mode` option, so the following two examples have same result:
|
set the `--network-mode` option, so the following two examples have same result:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files X: --network-mode
|
rclone @ remote:path/to/files X: --network-mode
|
||||||
rclone @ remote:path/to/files X: --volname \\server\share
|
rclone @ remote:path/to/files X: --volname \\server\share
|
||||||
```
|
```
|
||||||
@@ -140,7 +140,7 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
|
|||||||
specified with the `--volname` option. This will also implicitly set
|
specified with the `--volname` option. This will also implicitly set
|
||||||
the `--network-mode` option. This means the following two examples have same result:
|
the `--network-mode` option. This means the following two examples have same result:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone @ remote:path/to/files \\cloud\remote
|
rclone @ remote:path/to/files \\cloud\remote
|
||||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||||
```
|
```
|
||||||
@@ -296,7 +296,7 @@ from the website, rclone will locate the macFUSE libraries without any further i
|
|||||||
If however, macFUSE is installed using the [macports](https://www.macports.org/)
|
If however, macFUSE is installed using the [macports](https://www.macports.org/)
|
||||||
package manager, the following addition steps are required.
|
package manager, the following addition steps are required.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo mkdir /usr/local/lib
|
sudo mkdir /usr/local/lib
|
||||||
cd /usr/local/lib
|
cd /usr/local/lib
|
||||||
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
||||||
@@ -324,17 +324,6 @@ full new copy of the file.
|
|||||||
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||||
as opposed to with a clear warning as in macFUSE.
|
as opposed to with a clear warning as in macFUSE.
|
||||||
|
|
||||||
## Mounting on Linux
|
|
||||||
|
|
||||||
On newer versions of Ubuntu, you may encounter the following error when running
|
|
||||||
`rclone mount`:
|
|
||||||
|
|
||||||
> NOTICE: mount helper error: fusermount3: mount failed: Permission denied
|
|
||||||
> CRITICAL: Fatal error: failed to mount FUSE fs: fusermount: exit status 1
|
|
||||||
This may be due to newer [Apparmor](https://wiki.ubuntu.com/AppArmor) restrictions,
|
|
||||||
which can be disabled with `sudo aa-disable /usr/bin/fusermount3` (you may need to
|
|
||||||
`sudo apt install apparmor-utils` beforehand).
|
|
||||||
|
|
||||||
### Limitations
|
### Limitations
|
||||||
|
|
||||||
Without the use of `--vfs-cache-mode` this can only write files
|
Without the use of `--vfs-cache-mode` this can only write files
|
||||||
@@ -435,7 +424,7 @@ rclone will detect it and translate command-line arguments appropriately.
|
|||||||
|
|
||||||
Now you can run classic mounts like this:
|
Now you can run classic mounts like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -467,7 +456,7 @@ WantedBy=multi-user.target
|
|||||||
|
|
||||||
or add in `/etc/fstab` a line like
|
or add in `/etc/fstab` a line like
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -65,7 +65,7 @@ This takes the following parameters:
|
|||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
||||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
||||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||||
@@ -74,7 +74,7 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}
|
|||||||
The vfsOpt are as described in options/get and can be seen in the the
|
The vfsOpt are as described in options/get and can be seen in the the
|
||||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone rc options/get
|
rclone rc options/get
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
`,
|
`,
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ like the [move](/commands/rclone_move/) command.
|
|||||||
|
|
||||||
So
|
So
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone moveto src dst
|
rclone moveto src dst
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ This command can also accept a password through STDIN instead of an
|
|||||||
argument by passing a hyphen as an argument. This will use the first
|
argument by passing a hyphen as an argument. This will use the first
|
||||||
line of STDIN as the password not including the trailing newline.
|
line of STDIN as the password not including the trailing newline.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
echo "secretpassword" | rclone obscure -
|
echo "secretpassword" | rclone obscure -
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Short: `Copies standard input to file on remote.`,
|
Short: `Copies standard input to file on remote.`,
|
||||||
Long: `Reads from standard input (stdin) and copies it to a single remote file.
|
Long: `Reads from standard input (stdin) and copies it to a single remote file.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
echo "hello world" | rclone rcat remote:path/to/file
|
echo "hello world" | rclone rcat remote:path/to/file
|
||||||
ffmpeg - | rclone rcat remote:path/to/file
|
ffmpeg - | rclone rcat remote:path/to/file
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ Docker plugins can run as a managed plugin under control of the docker daemon
|
|||||||
or as an independent native service. For testing, you can just run it directly
|
or as an independent native service. For testing, you can just run it directly
|
||||||
from the command line, for example:
|
from the command line, for example:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
|
sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -119,7 +119,7 @@ following instructions.
|
|||||||
|
|
||||||
Now start the rclone restic server
|
Now start the rclone restic server
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone serve restic -v remote:backup
|
rclone serve restic -v remote:backup
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
@@ -149,7 +149,7 @@ the URL for the REST server.
|
|||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
|
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
|
||||||
$ export RESTIC_PASSWORD=yourpassword
|
$ export RESTIC_PASSWORD=yourpassword
|
||||||
$ restic init
|
$ restic init
|
||||||
@@ -173,7 +173,7 @@ Note that you can use the endpoint to host multiple repositories. Do
|
|||||||
this by adding a directory name or path after the URL. Note that
|
this by adding a directory name or path after the URL. Note that
|
||||||
these **must** end with /. Eg
|
these **must** end with /. Eg
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
|
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
|
||||||
# backup user1 stuff
|
# backup user1 stuff
|
||||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
|
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
|
||||||
|
|||||||
@@ -33,14 +33,14 @@ cause problems for S3 clients which rely on the Etag being the MD5.
|
|||||||
For a simple set up, to serve `remote:path` over s3, run the server
|
For a simple set up, to serve `remote:path` over s3, run the server
|
||||||
like this:
|
like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
|
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
|
||||||
```
|
```
|
||||||
|
|
||||||
For example, to use a simple folder in the filesystem, run the server
|
For example, to use a simple folder in the filesystem, run the server
|
||||||
with a command like this:
|
with a command like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY local:/path/to/folder
|
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY local:/path/to/folder
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -19,19 +19,11 @@ var Command = &cobra.Command{
|
|||||||
Long: `Serve a remote over a given protocol. Requires the use of a
|
Long: `Serve a remote over a given protocol. Requires the use of a
|
||||||
subcommand to specify the protocol, e.g.
|
subcommand to specify the protocol, e.g.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone serve http remote:
|
rclone serve http remote:
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
When the "--metadata" flag is enabled, the following metadata fields will be provided as headers:
|
Each subcommand has its own options which you can see in their help.`,
|
||||||
- "content-disposition"
|
|
||||||
- "cache-control"
|
|
||||||
- "content-language"
|
|
||||||
- "content-encoding"
|
|
||||||
Note: The availability of these fields depends on whether the remote supports metadata.
|
|
||||||
|
|
||||||
Each subcommand has its own options which you can see in their help.
|
|
||||||
`,
|
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.39",
|
"versionIntroduced": "v1.39",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ It can be configured with .socket and .service unit files as described in
|
|||||||
|
|
||||||
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command:
|
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
|
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -157,13 +157,13 @@ Create a new DWORD BasicAuthLevel with value 2.
|
|||||||
|
|
||||||
You can serve the webdav on a unix socket like this:
|
You can serve the webdav on a unix socket like this:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone serve webdav --addr unix:///tmp/my.socket remote:path
|
rclone serve webdav --addr unix:///tmp/my.socket remote:path
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
and connect to it like this using rclone and the webdav backend:
|
and connect to it like this using rclone and the webdav backend:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav:
|
rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav:
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -29,19 +29,19 @@ inaccessible.true
|
|||||||
|
|
||||||
You can use it to tier single object
|
You can use it to tier single object
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone settier Cool remote:path/file
|
rclone settier Cool remote:path/file
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Or use rclone filters to set tier on only specific files
|
Or use rclone filters to set tier on only specific files
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone --include "*.txt" settier Hot remote:path/dir
|
rclone --include "*.txt" settier Hot remote:path/dir
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
Or just provide remote directory and all files in directory will be tiered
|
Or just provide remote directory and all files in directory will be tiered
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone settier tier remote:path/dir
|
rclone settier tier remote:path/dir
|
||||||
` + "```",
|
` + "```",
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ var Command = &cobra.Command{
|
|||||||
|
|
||||||
Select which test command you want with the subcommand, eg
|
Select which test command you want with the subcommand, eg
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
rclone test memory remote:
|
rclone test memory remote:
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ build tags and the type of executable (static or dynamic).
|
|||||||
|
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone version
|
$ rclone version
|
||||||
rclone v1.55.0
|
rclone v1.55.0
|
||||||
- os/version: ubuntu 18.04 (64 bit)
|
- os/version: ubuntu 18.04 (64 bit)
|
||||||
@@ -60,7 +60,7 @@ Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
|||||||
If you supply the --check flag, then it will do an online check to
|
If you supply the --check flag, then it will do an online check to
|
||||||
compare your version with the latest release and the latest beta.
|
compare your version with the latest release and the latest beta.
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone version --check
|
$ rclone version --check
|
||||||
yours: 1.42.0.6
|
yours: 1.42.0.6
|
||||||
latest: 1.42 (released 2018-06-16)
|
latest: 1.42 (released 2018-06-16)
|
||||||
@@ -69,7 +69,7 @@ beta: 1.42.0.5 (released 2018-06-17)
|
|||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
` + "```console" + `
|
` + "```sh" + `
|
||||||
$ rclone version --check
|
$ rclone version --check
|
||||||
yours: 1.41
|
yours: 1.41
|
||||||
latest: 1.42 (released 2018-06-16)
|
latest: 1.42 (released 2018-06-16)
|
||||||
|
|||||||
@@ -32,9 +32,6 @@
|
|||||||
"renderer": {
|
"renderer": {
|
||||||
"unsafe": false
|
"unsafe": false
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"highlight": {
|
|
||||||
"style": "monokailight"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ notoc: true
|
|||||||
|
|
||||||
# Rclone syncs your files to cloud storage
|
# Rclone syncs your files to cloud storage
|
||||||
|
|
||||||
<!-- markdownlint-disable-next-line line-length -->
|
|
||||||
{{< img width="50%" src="/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >}}
|
{{< img width="50%" src="/img/logo_on_light__horizontal_color.svg" alt="rclone logo" style="float:right; padding: 5px;" >}}
|
||||||
|
|
||||||
- [About rclone](#about)
|
- [About rclone](#about)
|
||||||
@@ -16,7 +15,7 @@ notoc: true
|
|||||||
- [What providers does rclone support?](#providers)
|
- [What providers does rclone support?](#providers)
|
||||||
- [Download](/downloads/)
|
- [Download](/downloads/)
|
||||||
- [Install](/install/)
|
- [Install](/install/)
|
||||||
<!-- MAINPAGELINK -->
|
{{< rem MAINPAGELINK >}}
|
||||||
|
|
||||||
## About rclone {#about}
|
## About rclone {#about}
|
||||||
|
|
||||||
@@ -80,10 +79,8 @@ Rclone helps you:
|
|||||||
- Mirror cloud data to other cloud services or locally
|
- Mirror cloud data to other cloud services or locally
|
||||||
- Migrate data to the cloud, or between cloud storage vendors
|
- Migrate data to the cloud, or between cloud storage vendors
|
||||||
- Mount multiple, encrypted, cached or diverse cloud storage as a disk
|
- Mount multiple, encrypted, cached or diverse cloud storage as a disk
|
||||||
- Analyse and account for data held on cloud storage using [lsf](/commands/rclone_lsf/),
|
- Analyse and account for data held on cloud storage using [lsf](/commands/rclone_lsf/), [ljson](/commands/rclone_lsjson/), [size](/commands/rclone_size/), [ncdu](/commands/rclone_ncdu/)
|
||||||
[ljson](/commands/rclone_lsjson/), [size](/commands/rclone_size/), [ncdu](/commands/rclone_ncdu/)
|
- [Union](/union/) file systems together to present multiple local and/or cloud file systems as one
|
||||||
- [Union](/union/) file systems together to present multiple local and/or cloud
|
|
||||||
file systems as one
|
|
||||||
|
|
||||||
## Features {#features}
|
## Features {#features}
|
||||||
|
|
||||||
@@ -96,8 +93,7 @@ Rclone helps you:
|
|||||||
- [Copy](/commands/rclone_copy/) new or changed files to cloud storage
|
- [Copy](/commands/rclone_copy/) new or changed files to cloud storage
|
||||||
- [Sync](/commands/rclone_sync/) (one way) to make a directory identical
|
- [Sync](/commands/rclone_sync/) (one way) to make a directory identical
|
||||||
- [Bisync](/bisync/) (two way) to keep two directories in sync bidirectionally
|
- [Bisync](/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||||
- [Move](/commands/rclone_move/) files to cloud storage deleting the local after
|
- [Move](/commands/rclone_move/) files to cloud storage deleting the local after verification
|
||||||
verification
|
|
||||||
- [Check](/commands/rclone_check/) hashes and for missing/extra files
|
- [Check](/commands/rclone_check/) hashes and for missing/extra files
|
||||||
- [Mount](/commands/rclone_mount/) your cloud storage as a network disk
|
- [Mount](/commands/rclone_mount/) your cloud storage as a network disk
|
||||||
- [Serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[DLNA](/commands/rclone_serve_dlna/)
|
- [Serve](/commands/rclone_serve/) local or remote files over [HTTP](/commands/rclone_serve_http/)/[WebDav](/commands/rclone_serve_webdav/)/[FTP](/commands/rclone_serve_ftp/)/[SFTP](/commands/rclone_serve_sftp/)/[DLNA](/commands/rclone_serve_dlna/)
|
||||||
@@ -108,9 +104,6 @@ Rclone helps you:
|
|||||||
(There are many others, built on standard protocols such as
|
(There are many others, built on standard protocols such as
|
||||||
WebDAV or S3, that work out of the box.)
|
WebDAV or S3, that work out of the box.)
|
||||||
|
|
||||||
<!-- markdownlint-capture -->
|
|
||||||
<!-- markdownlint-disable line-length no-bare-urls -->
|
|
||||||
|
|
||||||
{{< provider_list >}}
|
{{< provider_list >}}
|
||||||
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
|
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
|
||||||
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
|
||||||
@@ -220,15 +213,10 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="The local filesystem" home="/local/" config="/local/" end="true">}}
|
{{< provider name="The local filesystem" home="/local/" config="/local/" end="true">}}
|
||||||
{{< /provider_list >}}
|
{{< /provider_list >}}
|
||||||
|
|
||||||
<!-- markdownlint-restore -->
|
|
||||||
|
|
||||||
## Virtual providers
|
## Virtual providers
|
||||||
|
|
||||||
These backends adapt or modify other storage providers:
|
These backends adapt or modify other storage providers:
|
||||||
|
|
||||||
<!-- markdownlint-capture -->
|
|
||||||
<!-- markdownlint-disable line-length no-bare-urls -->
|
|
||||||
|
|
||||||
{{< provider name="Alias: Rename existing remotes" home="/alias/" config="/alias/" >}}
|
{{< provider name="Alias: Rename existing remotes" home="/alias/" config="/alias/" >}}
|
||||||
{{< provider name="Archive: Read archive files" home="/archive/" config="/archive/" >}}
|
{{< provider name="Archive: Read archive files" home="/archive/" config="/archive/" >}}
|
||||||
{{< provider name="Cache: Cache remotes (DEPRECATED)" home="/cache/" config="/cache/" >}}
|
{{< provider name="Cache: Cache remotes (DEPRECATED)" home="/cache/" config="/cache/" >}}
|
||||||
@@ -239,8 +227,6 @@ These backends adapt or modify other storage providers:
|
|||||||
{{< provider name="Hasher: Hash files" home="/hasher/" config="/hasher/" >}}
|
{{< provider name="Hasher: Hash files" home="/hasher/" config="/hasher/" >}}
|
||||||
{{< provider name="Union: Join multiple remotes to work together" home="/union/" config="/union/" >}}
|
{{< provider name="Union: Join multiple remotes to work together" home="/union/" config="/union/" >}}
|
||||||
|
|
||||||
<!-- markdownlint-restore -->
|
|
||||||
|
|
||||||
## Links
|
## Links
|
||||||
|
|
||||||
- {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
- {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ can be used to only show the trashed files in `myDrive`.
|
|||||||
Here is an example of how to make an alias called `remote` for local folder.
|
Here is an example of how to make an alias called `remote` for local folder.
|
||||||
First run:
|
First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -83,28 +83,27 @@ q) Quit config
|
|||||||
e/n/d/r/c/s/q> q
|
e/n/d/r/c/s/q> q
|
||||||
```
|
```
|
||||||
|
|
||||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
Once configured you can then use `rclone` like this,
|
||||||
name you gave your remote):
|
|
||||||
|
|
||||||
List directories in top level in `/mnt/storage/backup`
|
List directories in top level in `/mnt/storage/backup`
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in `/mnt/storage/backup`
|
List all the files in `/mnt/storage/backup`
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy another local directory to the alias directory called source
|
Copy another local directory to the alias directory called source
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy /home/source remote:source
|
rclone copy /home/source remote:source
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/alias/alias.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/alias/alias.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to alias (Alias for an existing remote).
|
Here are the Standard options specific to alias (Alias for an existing remote).
|
||||||
@@ -137,4 +136,4 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -236,8 +236,7 @@ It would be possible to add ISO support fairly easily as the library we use ([go
|
|||||||
|
|
||||||
It would be possible to add write support, but this would only be for creating new archives, not for updating existing archives.
|
It would be possible to add write support, but this would only be for creating new archives, not for updating existing archives.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/archive/archive.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/archive/archive.go then run make backenddocs" >}}
|
||||||
|
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to archive (Read archives).
|
Here are the Standard options specific to archive (Read archives).
|
||||||
@@ -284,4 +283,4 @@ Any metadata supported by the underlying remote is read and written.
|
|||||||
|
|
||||||
See the [metadata](/docs/#metadata) docs for more info.
|
See the [metadata](/docs/#metadata) docs for more info.
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -12,9 +12,9 @@ description: "Rclone Authors and Contributors"
|
|||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
|
||||||
<!-- email addresses removed from here need to be added to
|
{{< rem `email addresses removed from here need to be added to
|
||||||
bin/.ignore-emails to make sure update-authors.py doesn't immediately
|
bin/.ignore-emails to make sure update-authors.py doesn't immediately
|
||||||
put them back in again. -->
|
put them back in again.` >}}
|
||||||
|
|
||||||
- Alex Couper <amcouper@gmail.com>
|
- Alex Couper <amcouper@gmail.com>
|
||||||
- Leonid Shalupov <leonid@shalupov.com> <shalupov@diverse.org.ru>
|
- Leonid Shalupov <leonid@shalupov.com> <shalupov@diverse.org.ru>
|
||||||
@@ -1031,19 +1031,3 @@ put them back in again. -->
|
|||||||
- divinity76 <hans@loltek.net>
|
- divinity76 <hans@loltek.net>
|
||||||
- Andrew Gunnerson <accounts+github@chiller3.com>
|
- Andrew Gunnerson <accounts+github@chiller3.com>
|
||||||
- Lakshmi-Surekha <Lakshmi.Kovvuri@ibm.com>
|
- Lakshmi-Surekha <Lakshmi.Kovvuri@ibm.com>
|
||||||
- dulanting <dulanting@outlook.jp>
|
|
||||||
- Adam Dinwoodie <me-and@users.noreply.github.com>
|
|
||||||
- Lukas Krejci <metlos@users.noreply.github.com>
|
|
||||||
- Riaz Arbi <riazarbi@users.noreply.github.com>
|
|
||||||
- Fawzib Rojas <fawzib.rojas@gmail.com>
|
|
||||||
- fries1234 <fries1234@protonmail.com>
|
|
||||||
- Joseph Brownlee <39440458+JellyJoe198@users.noreply.github.com>
|
|
||||||
- Ted Robertson <10043369+tredondo@users.noreply.github.com>
|
|
||||||
- SublimePeace <184005903+SublimePeace@users.noreply.github.com>
|
|
||||||
- Copilot <198982749+Copilot@users.noreply.github.com>
|
|
||||||
- Alex <64072843+A1ex3@users.noreply.github.com>
|
|
||||||
- n4n5 <its.just.n4n5@gmail.com>
|
|
||||||
- aliaj1 <ali19961@gmail.com>
|
|
||||||
- Sean Turner <30396892+seanturner026@users.noreply.github.com>
|
|
||||||
- jijamik <30904953+jijamik@users.noreply.github.com>
|
|
||||||
- Dominik Sander <git@dsander.de>
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ command.) You may put subdirectories in too, e.g.
|
|||||||
Here is an example of making a Microsoft Azure Blob Storage
|
Here is an example of making a Microsoft Azure Blob Storage
|
||||||
configuration. For a remote called `remote`. First run:
|
configuration. For a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -57,26 +57,26 @@ y/e/d> y
|
|||||||
|
|
||||||
See all containers
|
See all containers
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
Make a new container
|
Make a new container
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone mkdir remote:container
|
rclone mkdir remote:container
|
||||||
```
|
```
|
||||||
|
|
||||||
List the contents of a container
|
List the contents of a container
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:container
|
rclone ls remote:container
|
||||||
```
|
```
|
||||||
|
|
||||||
Sync `/home/local/directory` to the remote container, deleting any excess
|
Sync `/home/local/directory` to the remote container, deleting any excess
|
||||||
files in the container.
|
files in the container.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /home/local/directory remote:container
|
rclone sync --interactive /home/local/directory remote:container
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -212,25 +212,25 @@ Credentials created with the `az` tool can be picked up using `env_auth`.
|
|||||||
|
|
||||||
For example if you were to login with a service principal like this:
|
For example if you were to login with a service principal like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||||
```
|
```
|
||||||
|
|
||||||
Then you could access rclone resources like this:
|
Then you could access rclone resources like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER
|
rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER
|
||||||
```
|
```
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
|
rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
|
||||||
```
|
```
|
||||||
|
|
||||||
Which is analogous to using the `az` tool:
|
Which is analogous to using the `az` tool:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login
|
az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -253,14 +253,14 @@ explorer in the Azure portal.
|
|||||||
If you use a container level SAS URL, rclone operations are permitted
|
If you use a container level SAS URL, rclone operations are permitted
|
||||||
only on a particular container, e.g.
|
only on a particular container, e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls azureblob:container
|
rclone ls azureblob:container
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also list the single container from the root. This will only
|
You can also list the single container from the root. This will only
|
||||||
show the container specified by the SAS URL.
|
show the container specified by the SAS URL.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone lsd azureblob:
|
$ rclone lsd azureblob:
|
||||||
container/
|
container/
|
||||||
```
|
```
|
||||||
@@ -268,7 +268,7 @@ container/
|
|||||||
Note that you can't see or access any other containers - this will
|
Note that you can't see or access any other containers - this will
|
||||||
fail
|
fail
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls azureblob:othercontainer
|
rclone ls azureblob:othercontainer
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -364,11 +364,11 @@ Don't set `env_auth` at the same time.
|
|||||||
If you want to access resources with public anonymous access then set
|
If you want to access resources with public anonymous access then set
|
||||||
`account` only. You can do this without making an rclone config:
|
`account` only. You can do this without making an rclone config:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf :azureblob,account=ACCOUNT:CONTAINER
|
rclone lsf :azureblob,account=ACCOUNT:CONTAINER
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/azureblob/azureblob.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azureblob/azureblob.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to azureblob (Microsoft Azure Blob Storage).
|
Here are the Standard options specific to azureblob (Microsoft Azure Blob Storage).
|
||||||
@@ -1040,11 +1040,11 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
### Custom upload headers
|
### Custom upload headers
|
||||||
|
|
||||||
You can set custom upload headers with the `--header-upload` flag.
|
You can set custom upload headers with the `--header-upload` flag.
|
||||||
|
|
||||||
- Cache-Control
|
- Cache-Control
|
||||||
- Content-Disposition
|
- Content-Disposition
|
||||||
@@ -1053,21 +1053,19 @@ You can set custom upload headers with the `--header-upload` flag.
|
|||||||
- Content-Type
|
- Content-Type
|
||||||
- X-MS-Tags
|
- X-MS-Tags
|
||||||
|
|
||||||
Eg `--header-upload "Content-Type: text/potato"` or
|
Eg `--header-upload "Content-Type: text/potato"` or `--header-upload "X-MS-Tags: foo=bar"`
|
||||||
`--header-upload "X-MS-Tags: foo=bar"`.
|
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
MD5 sums are only uploaded with chunked files if the source has an MD5
|
MD5 sums are only uploaded with chunked files if the source has an MD5
|
||||||
sum. This will always be the case for a local to azure copy.
|
sum. This will always be the case for a local to azure copy.
|
||||||
|
|
||||||
`rclone about` is not supported by the Microsoft Azure Blob storage backend.
|
`rclone about` is not supported by the Microsoft Azure Blob storage backend. Backends without
|
||||||
Backends without this capability cannot determine free space for an rclone
|
this capability cannot determine free space for an rclone mount or
|
||||||
mount or use policy `mfs` (most free space) as a member of an rclone union
|
use policy `mfs` (most free space) as a member of an rclone union
|
||||||
remote.
|
remote.
|
||||||
|
|
||||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
and [rclone about](https://rclone.org/commands/rclone_about/).
|
|
||||||
|
|
||||||
## Azure Storage Emulator Support
|
## Azure Storage Emulator Support
|
||||||
|
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ e.g. `remote:path/to/dir`.
|
|||||||
Here is an example of making a Microsoft Azure Files Storage
|
Here is an example of making a Microsoft Azure Files Storage
|
||||||
configuration. For a remote called `remote`. First run:
|
configuration. For a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -90,26 +90,26 @@ Once configured you can use rclone.
|
|||||||
|
|
||||||
See all files in the top level:
|
See all files in the top level:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf remote:
|
rclone lsf remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
Make a new directory in the root:
|
Make a new directory in the root:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone mkdir remote:dir
|
rclone mkdir remote:dir
|
||||||
```
|
```
|
||||||
|
|
||||||
Recursively List the contents:
|
Recursively List the contents:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
Sync `/home/local/directory` to the remote directory, deleting any
|
Sync `/home/local/directory` to the remote directory, deleting any
|
||||||
excess files in the directory.
|
excess files in the directory.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /home/local/directory remote:dir
|
rclone sync --interactive /home/local/directory remote:dir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -238,19 +238,19 @@ Credentials created with the `az` tool can be picked up using `env_auth`.
|
|||||||
|
|
||||||
For example if you were to login with a service principal like this:
|
For example if you were to login with a service principal like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||||
```
|
```
|
||||||
|
|
||||||
Then you could access rclone resources like this:
|
Then you could access rclone resources like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf :azurefiles,env_auth,account=ACCOUNT:
|
rclone lsf :azurefiles,env_auth,account=ACCOUNT:
|
||||||
```
|
```
|
||||||
|
|
||||||
Or
|
Or
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf --azurefiles-env-auth --azurefiles-account=ACCOUNT :azurefiles:
|
rclone lsf --azurefiles-env-auth --azurefiles-account=ACCOUNT :azurefiles:
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -348,7 +348,7 @@ Setting this can be useful if you wish to use the `az` CLI on a host with
|
|||||||
a System Managed Identity that you do not want to use.
|
a System Managed Identity that you do not want to use.
|
||||||
Don't set `env_auth` at the same time.
|
Don't set `env_auth` at the same time.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to azurefiles (Microsoft Azure Files).
|
Here are the Standard options specific to azurefiles (Microsoft Azure Files).
|
||||||
@@ -793,7 +793,7 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
### Custom upload headers
|
### Custom upload headers
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
|
|||||||
|
|
||||||
Here is an example of making a b2 configuration. First run
|
Here is an example of making a b2 configuration. First run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -62,26 +62,27 @@ This remote is called `remote` and can now be used like this
|
|||||||
|
|
||||||
See all buckets
|
See all buckets
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a new bucket
|
Create a new bucket
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone mkdir remote:bucket
|
rclone mkdir remote:bucket
|
||||||
```
|
```
|
||||||
|
|
||||||
List the contents of a bucket
|
List the contents of a bucket
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:bucket
|
rclone ls remote:bucket
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||||
excess files in the bucket.
|
excess files in the bucket.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /home/local/directory remote:bucket
|
rclone sync --interactive /home/local/directory remote:bucket
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -97,7 +98,7 @@ Follow Backblaze's docs to create an Application Key with the required
|
|||||||
permission and add the `applicationKeyId` as the `account` and the
|
permission and add the `applicationKeyId` as the `account` and the
|
||||||
`Application Key` itself as the `key`.
|
`Application Key` itself as the `key`.
|
||||||
|
|
||||||
Note that you must put the *applicationKeyId* as the `account` – you
|
Note that you must put the _applicationKeyId_ as the `account` – you
|
||||||
can't use the master Account ID. If you try then B2 will return 401
|
can't use the master Account ID. If you try then B2 will return 401
|
||||||
errors.
|
errors.
|
||||||
|
|
||||||
@@ -191,8 +192,8 @@ You may opt in to a "hard delete" of files with the `--b2-hard-delete`
|
|||||||
flag which permanently removes files on deletion instead of hiding
|
flag which permanently removes files on deletion instead of hiding
|
||||||
them.
|
them.
|
||||||
|
|
||||||
Old versions of files, where available, are visible using the
|
Old versions of files, where available, are visible using the
|
||||||
`--b2-versions` flag. These can be deleted as required with `delete`.
|
`--b2-versions` flag.
|
||||||
|
|
||||||
It is also possible to view a bucket as it was at a certain point in time,
|
It is also possible to view a bucket as it was at a certain point in time,
|
||||||
using the `--b2-version-at` flag. This will show the file versions as they
|
using the `--b2-version-at` flag. This will show the file versions as they
|
||||||
@@ -229,7 +230,7 @@ version followed by a `cleanup` of the old versions.
|
|||||||
|
|
||||||
Show current version and all the versions with `--b2-versions` flag.
|
Show current version and all the versions with `--b2-versions` flag.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q ls b2:cleanup-test
|
$ rclone -q ls b2:cleanup-test
|
||||||
9 one.txt
|
9 one.txt
|
||||||
|
|
||||||
@@ -242,7 +243,7 @@ $ rclone -q --b2-versions ls b2:cleanup-test
|
|||||||
|
|
||||||
Retrieve an old version
|
Retrieve an old version
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q --b2-versions copy b2:cleanup-test/one-v2016-07-04-141003-000.txt /tmp
|
$ rclone -q --b2-versions copy b2:cleanup-test/one-v2016-07-04-141003-000.txt /tmp
|
||||||
|
|
||||||
$ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
$ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
||||||
@@ -251,7 +252,7 @@ $ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
|||||||
|
|
||||||
Clean up all the old versions and show that they've gone.
|
Clean up all the old versions and show that they've gone.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q cleanup b2:cleanup-test
|
$ rclone -q cleanup b2:cleanup-test
|
||||||
|
|
||||||
$ rclone -q ls b2:cleanup-test
|
$ rclone -q ls b2:cleanup-test
|
||||||
@@ -267,7 +268,7 @@ When using `--b2-versions` flag rclone is relying on the file name
|
|||||||
to work out whether the objects are versions or not. Versions' names
|
to work out whether the objects are versions or not. Versions' names
|
||||||
are created by inserting timestamp between file name and its extension.
|
are created by inserting timestamp between file name and its extension.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
9 file.txt
|
9 file.txt
|
||||||
8 file-v2023-07-17-161032-000.txt
|
8 file-v2023-07-17-161032-000.txt
|
||||||
16 file-v2023-06-15-141003-000.txt
|
16 file-v2023-06-15-141003-000.txt
|
||||||
@@ -321,14 +322,14 @@ rclone will show and act on older versions of files. For example
|
|||||||
|
|
||||||
Listing without `--b2-versions`
|
Listing without `--b2-versions`
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q ls b2:cleanup-test
|
$ rclone -q ls b2:cleanup-test
|
||||||
9 one.txt
|
9 one.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
And with
|
And with
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q --b2-versions ls b2:cleanup-test
|
$ rclone -q --b2-versions ls b2:cleanup-test
|
||||||
9 one.txt
|
9 one.txt
|
||||||
8 one-v2016-07-04-141032-000.txt
|
8 one-v2016-07-04-141032-000.txt
|
||||||
@@ -348,7 +349,7 @@ permitted, so you can't upload files or delete them.
|
|||||||
Rclone supports generating file share links for private B2 buckets.
|
Rclone supports generating file share links for private B2 buckets.
|
||||||
They can either be for a file for example:
|
They can either be for a file for example:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
./rclone link B2:bucket/path/to/file.txt
|
./rclone link B2:bucket/path/to/file.txt
|
||||||
https://f002.backblazeb2.com/file/bucket/path/to/file.txt?Authorization=xxxxxxxx
|
https://f002.backblazeb2.com/file/bucket/path/to/file.txt?Authorization=xxxxxxxx
|
||||||
|
|
||||||
@@ -356,7 +357,7 @@ https://f002.backblazeb2.com/file/bucket/path/to/file.txt?Authorization=xxxxxxxx
|
|||||||
|
|
||||||
or if run on a directory you will get:
|
or if run on a directory you will get:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
./rclone link B2:bucket/path
|
./rclone link B2:bucket/path
|
||||||
https://f002.backblazeb2.com/file/bucket/path?Authorization=xxxxxxxx
|
https://f002.backblazeb2.com/file/bucket/path?Authorization=xxxxxxxx
|
||||||
```
|
```
|
||||||
@@ -371,7 +372,7 @@ https://f002.backblazeb2.com/file/bucket/path/folder/file3?Authorization=xxxxxxx
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/b2/b2.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/b2/b2.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to b2 (Backblaze B2).
|
Here are the Standard options specific to b2 (Backblaze B2).
|
||||||
@@ -783,7 +784,7 @@ it would do.
|
|||||||
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
rclone backend cleanup-hidden b2:bucket/path/to/dir
|
||||||
|
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@@ -792,5 +793,6 @@ this capability cannot determine free space for an rclone mount or
|
|||||||
use policy `mfs` (most free space) as a member of an rclone union
|
use policy `mfs` (most free space) as a member of an rclone union
|
||||||
remote.
|
remote.
|
||||||
|
|
||||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
and [rclone about](https://rclone.org/commands/rclone_about/).
|
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ section) before using, or data loss can result. Questions can be asked in the
|
|||||||
|
|
||||||
For example, your first command might look like this:
|
For example, your first command might look like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
|
rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -40,7 +40,7 @@ After that, remove `--resync` as well.
|
|||||||
|
|
||||||
Here is a typical run log (with timestamps removed for clarity):
|
Here is a typical run log (with timestamps removed for clarity):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone bisync /testdir/path1/ /testdir/path2/ --verbose
|
rclone bisync /testdir/path1/ /testdir/path2/ --verbose
|
||||||
INFO : Synching Path1 "/testdir/path1/" with Path2 "/testdir/path2/"
|
INFO : Synching Path1 "/testdir/path1/" with Path2 "/testdir/path2/"
|
||||||
INFO : Path1 checking for diffs
|
INFO : Path1 checking for diffs
|
||||||
@@ -86,7 +86,7 @@ INFO : Bisync successful
|
|||||||
|
|
||||||
## Command line syntax
|
## Command line syntax
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone bisync --help
|
$ rclone bisync --help
|
||||||
Usage:
|
Usage:
|
||||||
rclone bisync remote1:path1 remote2:path2 [flags]
|
rclone bisync remote1:path1 remote2:path2 [flags]
|
||||||
@@ -169,7 +169,7 @@ be copied to Path1, and the process will then copy the Path1 tree to Path2.
|
|||||||
The `--resync` sequence is roughly equivalent to the following
|
The `--resync` sequence is roughly equivalent to the following
|
||||||
(but see [`--resync-mode`](#resync-mode) for other options):
|
(but see [`--resync-mode`](#resync-mode) for other options):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
|
rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
|
||||||
rclone copy Path1 Path2 [--create-empty-src-dirs]
|
rclone copy Path1 Path2 [--create-empty-src-dirs]
|
||||||
```
|
```
|
||||||
@@ -225,7 +225,7 @@ Shutdown](#graceful-shutdown) mode, when needed) for a very robust
|
|||||||
almost any interruption it might encounter. Consider adding something like the
|
almost any interruption it might encounter. Consider adding something like the
|
||||||
following:
|
following:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--resilient --recover --max-lock 2m --conflict-resolve newer
|
--resilient --recover --max-lock 2m --conflict-resolve newer
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -353,13 +353,13 @@ simultaneously (or just `modtime` AND `checksum`).
|
|||||||
being `size`, `modtime`, and `checksum`. For example, if you want to compare
|
being `size`, `modtime`, and `checksum`. For example, if you want to compare
|
||||||
size and checksum, but not modtime, you would do:
|
size and checksum, but not modtime, you would do:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--compare size,checksum
|
--compare size,checksum
|
||||||
```
|
```
|
||||||
|
|
||||||
Or if you want to compare all three:
|
Or if you want to compare all three:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--compare size,modtime,checksum
|
--compare size,modtime,checksum
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -627,7 +627,7 @@ specified (or when two identical suffixes are specified.) i.e. with
|
|||||||
`--conflict-loser pathname`, all of the following would produce exactly the
|
`--conflict-loser pathname`, all of the following would produce exactly the
|
||||||
same result:
|
same result:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--conflict-suffix path
|
--conflict-suffix path
|
||||||
--conflict-suffix path,path
|
--conflict-suffix path,path
|
||||||
--conflict-suffix path1,path2
|
--conflict-suffix path1,path2
|
||||||
@@ -642,7 +642,7 @@ changed with the [`--suffix-keep-extension`](/docs/#suffix-keep-extension) flag
|
|||||||
curly braces as globs. This can be helpful to track the date and/or time that
|
curly braces as globs. This can be helpful to track the date and/or time that
|
||||||
each conflict was handled by bisync. For example:
|
each conflict was handled by bisync. For example:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--conflict-suffix {DateOnly}-conflict
|
--conflict-suffix {DateOnly}-conflict
|
||||||
// result: myfile.txt.2006-01-02-conflict1
|
// result: myfile.txt.2006-01-02-conflict1
|
||||||
```
|
```
|
||||||
@@ -667,7 +667,7 @@ conflicts with `..path1` and `..path2` (with two periods, and `path` instead of
|
|||||||
additional dots can be added by including them in the specified suffix string.
|
additional dots can be added by including them in the specified suffix string.
|
||||||
For example, for behavior equivalent to the previous default, use:
|
For example, for behavior equivalent to the previous default, use:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
[--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
|
[--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -707,13 +707,13 @@ For example, a possible sequence could look like this:
|
|||||||
|
|
||||||
1. Normally scheduled bisync run:
|
1. Normally scheduled bisync run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
|
rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Periodic independent integrity check (perhaps scheduled nightly or weekly):
|
2. Periodic independent integrity check (perhaps scheduled nightly or weekly):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
|
rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -721,7 +721,7 @@ For example, a possible sequence could look like this:
|
|||||||
If one side is more up-to-date and you want to make the other side match it,
|
If one side is more up-to-date and you want to make the other side match it,
|
||||||
you could run:
|
you could run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
|
rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -851,7 +851,7 @@ override `--backup-dir`.
|
|||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
|
rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1383,7 +1383,7 @@ listings and thus not checked during the check access phase.
|
|||||||
Here are two normal runs. The first one has a newer file on the remote.
|
Here are two normal runs. The first one has a newer file on the remote.
|
||||||
The second has no deltas between local and remote.
|
The second has no deltas between local and remote.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
2021/05/16 00:24:38 INFO : Synching Path1 "/path/to/local/tree/" with Path2 "dropbox:/"
|
2021/05/16 00:24:38 INFO : Synching Path1 "/path/to/local/tree/" with Path2 "dropbox:/"
|
||||||
2021/05/16 00:24:38 INFO : Path1 checking for diffs
|
2021/05/16 00:24:38 INFO : Path1 checking for diffs
|
||||||
2021/05/16 00:24:38 INFO : - Path1 File is new - file.txt
|
2021/05/16 00:24:38 INFO : - Path1 File is new - file.txt
|
||||||
@@ -1433,7 +1433,7 @@ numerous such messages in the log.
|
|||||||
Since there are no final error/warning messages on line *7*, rclone has
|
Since there are no final error/warning messages on line *7*, rclone has
|
||||||
recovered from failure after a retry, and the overall sync was successful.
|
recovered from failure after a retry, and the overall sync was successful.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
1: 2021/05/14 00:44:12 INFO : Synching Path1 "/path/to/local/tree" with Path2 "dropbox:"
|
1: 2021/05/14 00:44:12 INFO : Synching Path1 "/path/to/local/tree" with Path2 "dropbox:"
|
||||||
2: 2021/05/14 00:44:12 INFO : Path1 checking for diffs
|
2: 2021/05/14 00:44:12 INFO : Path1 checking for diffs
|
||||||
3: 2021/05/14 00:44:12 INFO : Path2 checking for diffs
|
3: 2021/05/14 00:44:12 INFO : Path2 checking for diffs
|
||||||
@@ -1446,7 +1446,7 @@ recovered from failure after a retry, and the overall sync was successful.
|
|||||||
This log shows a *Critical failure* which requires a `--resync` to recover from.
|
This log shows a *Critical failure* which requires a `--resync` to recover from.
|
||||||
See the [Runtime Error Handling](#error-handling) section.
|
See the [Runtime Error Handling](#error-handling) section.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for checks to finish
|
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for checks to finish
|
||||||
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for transfers to finish
|
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for transfers to finish
|
||||||
2021/05/12 00:49:40 INFO : Google drive root '': not deleting files as there were IO errors
|
2021/05/12 00:49:40 INFO : Google drive root '': not deleting files as there were IO errors
|
||||||
@@ -1531,7 +1531,7 @@ on Linux you can use *Cron* which is described below.
|
|||||||
The 1st example runs a sync every 5 minutes between a local directory
|
The 1st example runs a sync every 5 minutes between a local directory
|
||||||
and an OwnCloud server, with output logged to a runlog file:
|
and an OwnCloud server, with output logged to a runlog file:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
# Minute (0-59)
|
# Minute (0-59)
|
||||||
# Hour (0-23)
|
# Hour (0-23)
|
||||||
# Day of Month (1-31)
|
# Day of Month (1-31)
|
||||||
@@ -1548,7 +1548,7 @@ If you run `rclone bisync` as a cron job, redirect stdout/stderr to a file.
|
|||||||
The 2nd example runs a sync to Dropbox every hour and logs all stdout (via the `>>`)
|
The 2nd example runs a sync to Dropbox every hour and logs all stdout (via the `>>`)
|
||||||
and stderr (via `2>&1`) to a log file.
|
and stderr (via `2>&1`) to a log file.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
0 * * * * /path/to/rclone bisync /path/to/local/dropbox Dropbox: --check-access --filters-file /home/user/filters.txt >> /path/to/logs/dropbox-run.log 2>&1
|
0 * * * * /path/to/rclone bisync /path/to/local/dropbox Dropbox: --check-access --filters-file /home/user/filters.txt >> /path/to/logs/dropbox-run.log 2>&1
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1630,7 +1630,7 @@ Rerunning the test will let it pass. Consider such failures as noise.
|
|||||||
|
|
||||||
### Test command syntax
|
### Test command syntax
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
usage: go test ./cmd/bisync [options...]
|
usage: go test ./cmd/bisync [options...]
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ to use JWT authentication. `rclone config` walks you through it.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -92,26 +92,23 @@ your browser to the moment you get back the verification code. This
|
|||||||
is on `http://127.0.0.1:53682/` and this may require you to unblock
|
is on `http://127.0.0.1:53682/` and this may require you to unblock
|
||||||
it temporarily if you are running a host firewall.
|
it temporarily if you are running a host firewall.
|
||||||
|
|
||||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
Once configured you can then use `rclone` like this,
|
||||||
name you gave your remote):
|
|
||||||
|
|
||||||
List directories in top level of your Box
|
List directories in top level of your Box
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your Box
|
List all the files in your Box
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy a local directory to an Box directory called backup
|
To copy a local directory to an Box directory called backup
|
||||||
|
|
||||||
```console
|
rclone copy /home/source remote:backup
|
||||||
rclone copy /home/source remote:backup
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using rclone with an Enterprise account with SSO
|
### Using rclone with an Enterprise account with SSO
|
||||||
|
|
||||||
@@ -147,7 +144,7 @@ did the authentication on.
|
|||||||
|
|
||||||
Here is how to do it.
|
Here is how to do it.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone config
|
$ rclone config
|
||||||
Current remotes:
|
Current remotes:
|
||||||
|
|
||||||
@@ -251,8 +248,8 @@ either be actually deleted from Box or moved to the trash.
|
|||||||
|
|
||||||
Emptying the trash is supported via the rclone however cleanup command
|
Emptying the trash is supported via the rclone however cleanup command
|
||||||
however this deletes every trashed file and folder individually so it
|
however this deletes every trashed file and folder individually so it
|
||||||
may take a very long time.
|
may take a very long time.
|
||||||
Emptying the trash via the WebUI does not have this limitation
|
Emptying the trash via the WebUI does not have this limitation
|
||||||
so it is advised to empty the trash via the WebUI.
|
so it is advised to empty the trash via the WebUI.
|
||||||
|
|
||||||
### Root folder ID
|
### Root folder ID
|
||||||
@@ -277,7 +274,7 @@ So if the folder you want rclone to use has a URL which looks like
|
|||||||
in the browser, then you use `11xxxxxxxxx8` as
|
in the browser, then you use `11xxxxxxxxx8` as
|
||||||
the `root_folder_id` in the config.
|
the `root_folder_id` in the config.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to box (Box).
|
Here are the Standard options specific to box (Box).
|
||||||
@@ -509,7 +506,7 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@@ -522,16 +519,14 @@ Reverse Solidus).
|
|||||||
|
|
||||||
Box only supports filenames up to 255 characters in length.
|
Box only supports filenames up to 255 characters in length.
|
||||||
|
|
||||||
Box has [API rate limits](https://developer.box.com/guides/api-calls/permissions-and-errors/rate-limits/)
|
Box has [API rate limits](https://developer.box.com/guides/api-calls/permissions-and-errors/rate-limits/) that sometimes reduce the speed of rclone.
|
||||||
that sometimes reduce the speed of rclone.
|
|
||||||
|
|
||||||
`rclone about` is not supported by the Box backend. Backends without
|
`rclone about` is not supported by the Box backend. Backends without
|
||||||
this capability cannot determine free space for an rclone mount or
|
this capability cannot determine free space for an rclone mount or
|
||||||
use policy `mfs` (most free space) as a member of an rclone union
|
use policy `mfs` (most free space) as a member of an rclone union
|
||||||
remote.
|
remote.
|
||||||
|
|
||||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
and [rclone about](https://rclone.org/commands/rclone_about/).
|
|
||||||
|
|
||||||
## Get your own Box App ID
|
## Get your own Box App ID
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ with `cache`.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `test-cache`. First run:
|
Here is an example of how to make a remote called `test-cache`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -117,19 +117,19 @@ You can then use it like this,
|
|||||||
|
|
||||||
List directories in top level of your drive
|
List directories in top level of your drive
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd test-cache:
|
rclone lsd test-cache:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your drive
|
List all the files in your drive
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls test-cache:
|
rclone ls test-cache:
|
||||||
```
|
```
|
||||||
|
|
||||||
To start a cached mount
|
To start a cached mount
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone mount --allow-other test-cache: /var/tmp/test-cache
|
rclone mount --allow-other test-cache: /var/tmp/test-cache
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -325,7 +325,7 @@ Params:
|
|||||||
- **withData** = true/false to delete cached data (chunks) as
|
- **withData** = true/false to delete cached data (chunks) as
|
||||||
well *(optional, false by default)*
|
well *(optional, false by default)*
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/cache/cache.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/cache/cache.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to cache (Cache a remote).
|
Here are the Standard options specific to cache (Cache a remote).
|
||||||
@@ -716,4 +716,4 @@ Print stats on the cache backend in JSON format.
|
|||||||
|
|
||||||
rclone backend stats remote: [options] [<arguments>+]
|
rclone backend stats remote: [options] [<arguments>+]
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -313,7 +313,7 @@ to keep rclone up-to-date to avoid data corruption.
|
|||||||
|
|
||||||
Changing `transactions` is dangerous and requires explicit migration.
|
Changing `transactions` is dangerous and requires explicit migration.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/chunker/chunker.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/chunker/chunker.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to chunker (Transparently chunk/split large files).
|
Here are the Standard options specific to chunker (Transparently chunk/split large files).
|
||||||
@@ -481,4 +481,4 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ from the developer section.
|
|||||||
|
|
||||||
Now run
|
Now run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -113,19 +113,19 @@ y/e/d> y
|
|||||||
|
|
||||||
List directories in the top level of your Media Library
|
List directories in the top level of your Media Library
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd cloudinary-media-library:
|
rclone lsd cloudinary-media-library:
|
||||||
```
|
```
|
||||||
|
|
||||||
Make a new directory.
|
Make a new directory.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone mkdir cloudinary-media-library:directory
|
rclone mkdir cloudinary-media-library:directory
|
||||||
```
|
```
|
||||||
|
|
||||||
List the contents of a directory.
|
List the contents of a directory.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls cloudinary-media-library:directory
|
rclone ls cloudinary-media-library:directory
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -133,7 +133,7 @@ rclone ls cloudinary-media-library:directory
|
|||||||
|
|
||||||
Cloudinary stores md5 and timestamps for any successful Put automatically and read-only.
|
Cloudinary stores md5 and timestamps for any successful Put automatically and read-only.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/cloudinary/cloudinary.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/cloudinary/cloudinary.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to cloudinary (Cloudinary).
|
Here are the Standard options specific to cloudinary (Cloudinary).
|
||||||
@@ -254,4 +254,4 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ tree.
|
|||||||
|
|
||||||
For example you might have a remote for images on one provider:
|
For example you might have a remote for images on one provider:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone tree s3:imagesbucket
|
$ rclone tree s3:imagesbucket
|
||||||
/
|
/
|
||||||
├── image1.jpg
|
├── image1.jpg
|
||||||
@@ -20,7 +20,7 @@ $ rclone tree s3:imagesbucket
|
|||||||
|
|
||||||
And a remote for files on another:
|
And a remote for files on another:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone tree drive:important/files
|
$ rclone tree drive:important/files
|
||||||
/
|
/
|
||||||
├── file1.txt
|
├── file1.txt
|
||||||
@@ -30,7 +30,7 @@ $ rclone tree drive:important/files
|
|||||||
The `combine` backend can join these together into a synthetic
|
The `combine` backend can join these together into a synthetic
|
||||||
directory structure like this:
|
directory structure like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone tree combined:
|
$ rclone tree combined:
|
||||||
/
|
/
|
||||||
├── files
|
├── files
|
||||||
@@ -57,7 +57,7 @@ either be a local paths or other remotes.
|
|||||||
Here is an example of how to make a combine called `remote` for the
|
Here is an example of how to make a combine called `remote` for the
|
||||||
example above. First run:
|
example above. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -107,7 +107,7 @@ the shared drives you have access to.
|
|||||||
Assuming your main (non shared drive) Google drive remote is called
|
Assuming your main (non shared drive) Google drive remote is called
|
||||||
`drive:` you would run
|
`drive:` you would run
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone backend -o config drives drive:
|
rclone backend -o config drives drive:
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -133,7 +133,7 @@ with the `AllDrives:` remote.
|
|||||||
|
|
||||||
See [the Google Drive docs](/drive/#drives) for full info.
|
See [the Google Drive docs](/drive/#drives) for full info.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/combine/combine.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/combine/combine.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to combine (Combine several remotes into one).
|
Here are the Standard options specific to combine (Combine several remotes into one).
|
||||||
@@ -183,4 +183,4 @@ Any metadata supported by the underlying remote is read and written.
|
|||||||
|
|
||||||
See the [metadata](/docs/#metadata) docs for more info.
|
See the [metadata](/docs/#metadata) docs for more info.
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ To use this remote, all you need to do is specify another remote and a
|
|||||||
compression mode to use:
|
compression mode to use:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ rclone config
|
|
||||||
Current remotes:
|
Current remotes:
|
||||||
|
|
||||||
Name Type
|
Name Type
|
||||||
@@ -31,6 +30,7 @@ Name Type
|
|||||||
remote_to_press sometype
|
remote_to_press sometype
|
||||||
|
|
||||||
e) Edit existing remote
|
e) Edit existing remote
|
||||||
|
$ rclone config
|
||||||
n) New remote
|
n) New remote
|
||||||
d) Delete remote
|
d) Delete remote
|
||||||
r) Rename remote
|
r) Rename remote
|
||||||
@@ -39,74 +39,45 @@ s) Set configuration password
|
|||||||
q) Quit config
|
q) Quit config
|
||||||
e/n/d/r/c/s/q> n
|
e/n/d/r/c/s/q> n
|
||||||
name> compress
|
name> compress
|
||||||
|
|
||||||
Option Storage.
|
|
||||||
Type of storage to configure.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
...
|
...
|
||||||
12 / Compress a remote
|
8 / Compress a remote
|
||||||
\ (compress)
|
\ "compress"
|
||||||
...
|
...
|
||||||
Storage> compress
|
Storage> compress
|
||||||
|
** See help for compress backend at: https://rclone.org/compress/ **
|
||||||
|
|
||||||
Option remote.
|
|
||||||
Remote to compress.
|
Remote to compress.
|
||||||
Enter a value.
|
Enter a string value. Press Enter for the default ("").
|
||||||
remote> remote_to_press:subdir
|
remote> remote_to_press:subdir
|
||||||
|
|
||||||
Option mode.
|
|
||||||
Compression mode.
|
Compression mode.
|
||||||
Choose a number from below, or type in your own value of type string.
|
Enter a string value. Press Enter for the default ("gzip").
|
||||||
Press Enter for the default (gzip).
|
Choose a number from below, or type in your own value
|
||||||
1 / Standard gzip compression with fastest parameters.
|
1 / Gzip compression balanced for speed and compression strength.
|
||||||
\ (gzip)
|
\ "gzip"
|
||||||
2 / Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.
|
compression_mode> gzip
|
||||||
\ (zstd)
|
Edit advanced config? (y/n)
|
||||||
mode> gzip
|
|
||||||
|
|
||||||
Option level.
|
|
||||||
GZIP (levels -2 to 9):
|
|
||||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
|
||||||
- -1 (default) — recommended; equivalent to level 5.
|
|
||||||
- 0 — turns off compression.
|
|
||||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
|
||||||
|
|
||||||
ZSTD (levels 0 to 4):
|
|
||||||
- 0 — turns off compression entirely.
|
|
||||||
- 1 — fastest compression with the lowest ratio.
|
|
||||||
- 2 (default) — good balance of speed and compression.
|
|
||||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
|
||||||
- 4 — best possible compression ratio (highest CPU cost).
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
|
||||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).
|
|
||||||
Enter a value.
|
|
||||||
level> -1
|
|
||||||
|
|
||||||
Edit advanced config?
|
|
||||||
y) Yes
|
y) Yes
|
||||||
n) No (default)
|
n) No (default)
|
||||||
y/n> n
|
y/n> n
|
||||||
|
Remote config
|
||||||
Configuration complete.
|
--------------------
|
||||||
Options:
|
[compress]
|
||||||
- type: compress
|
type = compress
|
||||||
- remote: remote_to_press:subdir
|
remote = remote_to_press:subdir
|
||||||
- mode: gzip
|
compression_mode = gzip
|
||||||
- level: -1
|
--------------------
|
||||||
Keep this "compress" remote?
|
|
||||||
y) Yes this is OK (default)
|
y) Yes this is OK (default)
|
||||||
e) Edit this remote
|
e) Edit this remote
|
||||||
d) Delete this remote
|
d) Delete this remote
|
||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
### Compression Algorithms
|
### Compression Modes
|
||||||
|
|
||||||
- **GZIP** – a well-established and widely adopted algorithm that strikes a solid balance between compression speed and ratio. It supports compression levels from -2 to 9, with the default -1 (roughly equivalent to level 5) offering an effective middle ground for most scenarios.
|
Currently only gzip compression is supported. It provides a decent balance
|
||||||
|
between speed and size and is well supported by other applications. Compression
|
||||||
- **Zstandard (zstd)** – a modern, high-performance algorithm that offers precise control over the trade-off between speed and compression efficiency. Compression levels range from 0 (no compression) to 4 (maximum compression).
|
strength can further be configured via an advanced setting where 0 is no
|
||||||
|
compression and 9 is strongest compression.
|
||||||
|
|
||||||
### File types
|
### File types
|
||||||
|
|
||||||
@@ -124,7 +95,7 @@ The compressed files will be named `*.###########.gz` where `*` is the base
|
|||||||
file and the `#` part is base64 encoded size of the uncompressed file. The file
|
file and the `#` part is base64 encoded size of the uncompressed file. The file
|
||||||
names should not be changed by anything other than the rclone compression backend.
|
names should not be changed by anything other than the rclone compression backend.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/compress/compress.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/compress/compress.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to compress (Compress a remote).
|
Here are the Standard options specific to compress (Compress a remote).
|
||||||
@@ -153,38 +124,29 @@ Properties:
|
|||||||
- Examples:
|
- Examples:
|
||||||
- "gzip"
|
- "gzip"
|
||||||
- Standard gzip compression with fastest parameters.
|
- Standard gzip compression with fastest parameters.
|
||||||
- "zstd"
|
|
||||||
- Zstandard compression — fast modern algorithm offering adjustable speed-to-compression tradeoffs.
|
### Advanced options
|
||||||
|
|
||||||
|
Here are the Advanced options specific to compress (Compress a remote).
|
||||||
|
|
||||||
#### --compress-level
|
#### --compress-level
|
||||||
|
|
||||||
GZIP (levels -2 to 9):
|
GZIP compression level (-2 to 9).
|
||||||
- -2 — Huffman encoding only. Only use if you know what you're doing.
|
|
||||||
- -1 (default) — recommended; equivalent to level 5.
|
Generally -1 (default, equivalent to 5) is recommended.
|
||||||
- 0 — turns off compression.
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
- 1–9 — increase compression at the cost of speed. Going past 6 generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
ZSTD (levels 0 to 4):
|
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||||
- 0 — turns off compression entirely.
|
are doing.
|
||||||
- 1 — fastest compression with the lowest ratio.
|
Level 0 turns off compression.
|
||||||
- 2 (default) — good balance of speed and compression.
|
|
||||||
- 3 — better compression, but uses about 2–3x more CPU than the default.
|
|
||||||
- 4 — best possible compression ratio (highest CPU cost).
|
|
||||||
|
|
||||||
Notes:
|
|
||||||
- Choose GZIP for wide compatibility; ZSTD for better speed/ratio tradeoffs.
|
|
||||||
- Negative gzip levels: -2 = Huffman-only, -1 = default (≈ level 5).
|
|
||||||
|
|
||||||
Properties:
|
Properties:
|
||||||
|
|
||||||
- Config: level
|
- Config: level
|
||||||
- Env Var: RCLONE_COMPRESS_LEVEL
|
- Env Var: RCLONE_COMPRESS_LEVEL
|
||||||
- Type: string
|
- Type: int
|
||||||
- Required: true
|
- Default: -1
|
||||||
|
|
||||||
### Advanced options
|
|
||||||
|
|
||||||
Here are the Advanced options specific to compress (Compress a remote).
|
|
||||||
|
|
||||||
#### --compress-ram-cache-limit
|
#### --compress-ram-cache-limit
|
||||||
|
|
||||||
@@ -219,4 +181,4 @@ Any metadata supported by the underlying remote is read and written.
|
|||||||
|
|
||||||
See the [metadata](/docs/#metadata) docs for more info.
|
See the [metadata](/docs/#metadata) docs for more info.
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -274,7 +274,7 @@ details, and a tool you can use to check if you are affected.
|
|||||||
Create the following file structure using "standard" file name
|
Create the following file structure using "standard" file name
|
||||||
encryption.
|
encryption.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
plaintext/
|
plaintext/
|
||||||
├── file0.txt
|
├── file0.txt
|
||||||
├── file1.txt
|
├── file1.txt
|
||||||
@@ -287,7 +287,7 @@ plaintext/
|
|||||||
|
|
||||||
Copy these to the remote, and list them
|
Copy these to the remote, and list them
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q copy plaintext secret:
|
$ rclone -q copy plaintext secret:
|
||||||
$ rclone -q ls secret:
|
$ rclone -q ls secret:
|
||||||
7 file1.txt
|
7 file1.txt
|
||||||
@@ -299,7 +299,7 @@ $ rclone -q ls secret:
|
|||||||
|
|
||||||
The crypt remote looks like
|
The crypt remote looks like
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
55 hagjclgavj2mbiqm6u6cnjjqcg
|
55 hagjclgavj2mbiqm6u6cnjjqcg
|
||||||
54 v05749mltvv1tf4onltun46gls
|
54 v05749mltvv1tf4onltun46gls
|
||||||
@@ -310,7 +310,7 @@ $ rclone -q ls remote:path
|
|||||||
|
|
||||||
The directory structure is preserved
|
The directory structure is preserved
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q ls secret:subdir
|
$ rclone -q ls secret:subdir
|
||||||
8 file2.txt
|
8 file2.txt
|
||||||
9 file3.txt
|
9 file3.txt
|
||||||
@@ -321,7 +321,7 @@ Without file name encryption `.bin` extensions are added to underlying
|
|||||||
names. This prevents the cloud provider attempting to interpret file
|
names. This prevents the cloud provider attempting to interpret file
|
||||||
content.
|
content.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone -q ls remote:path
|
$ rclone -q ls remote:path
|
||||||
54 file0.txt.bin
|
54 file0.txt.bin
|
||||||
57 subdir/file3.txt.bin
|
57 subdir/file3.txt.bin
|
||||||
@@ -418,7 +418,7 @@ Use the `rclone cryptcheck` command to check the
|
|||||||
integrity of an encrypted remote instead of `rclone check` which can't
|
integrity of an encrypted remote instead of `rclone check` which can't
|
||||||
check the checksums properly.
|
check the checksums properly.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/crypt/crypt.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/crypt/crypt.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to crypt (Encrypt/Decrypt a remote).
|
Here are the Standard options specific to crypt (Encrypt/Decrypt a remote).
|
||||||
@@ -697,7 +697,7 @@ Usage Example:
|
|||||||
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...]
|
||||||
|
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Backing up an encrypted remote
|
## Backing up an encrypted remote
|
||||||
|
|
||||||
@@ -707,9 +707,9 @@ the same in the new encrypted remote.
|
|||||||
|
|
||||||
This will have the following advantages
|
This will have the following advantages
|
||||||
|
|
||||||
- `rclone sync` will check the checksums while copying
|
* `rclone sync` will check the checksums while copying
|
||||||
- you can use `rclone check` between the encrypted remotes
|
* you can use `rclone check` between the encrypted remotes
|
||||||
- you don't decrypt and encrypt unnecessarily
|
* you don't decrypt and encrypt unnecessarily
|
||||||
|
|
||||||
For example, let's say you have your original remote at `remote:` with
|
For example, let's say you have your original remote at `remote:` with
|
||||||
the encrypted version at `eremote:` with path `remote:crypt`. You
|
the encrypted version at `eremote:` with path `remote:crypt`. You
|
||||||
@@ -719,15 +719,11 @@ as `eremote:`.
|
|||||||
|
|
||||||
To sync the two remotes you would do
|
To sync the two remotes you would do
|
||||||
|
|
||||||
```console
|
rclone sync --interactive remote:crypt remote2:crypt
|
||||||
rclone sync --interactive remote:crypt remote2:crypt
|
|
||||||
```
|
|
||||||
|
|
||||||
And to check the integrity you would do
|
And to check the integrity you would do
|
||||||
|
|
||||||
```console
|
rclone check remote:crypt remote2:crypt
|
||||||
rclone check remote:crypt remote2:crypt
|
|
||||||
```
|
|
||||||
|
|
||||||
## File formats
|
## File formats
|
||||||
|
|
||||||
@@ -738,8 +734,8 @@ has a header and is divided into chunks.
|
|||||||
|
|
||||||
#### Header
|
#### Header
|
||||||
|
|
||||||
- 8 bytes magic string `RCLONE\x00\x00`
|
* 8 bytes magic string `RCLONE\x00\x00`
|
||||||
- 24 bytes Nonce (IV)
|
* 24 bytes Nonce (IV)
|
||||||
|
|
||||||
The initial nonce is generated from the operating systems crypto
|
The initial nonce is generated from the operating systems crypto
|
||||||
strong random number generator. The nonce is incremented for each
|
strong random number generator. The nonce is incremented for each
|
||||||
@@ -757,8 +753,8 @@ authenticate messages.
|
|||||||
|
|
||||||
Each chunk contains:
|
Each chunk contains:
|
||||||
|
|
||||||
- 16 Bytes of Poly1305 authenticator
|
* 16 Bytes of Poly1305 authenticator
|
||||||
- 1 - 65536 bytes XSalsa20 encrypted data
|
* 1 - 65536 bytes XSalsa20 encrypted data
|
||||||
|
|
||||||
64k chunk size was chosen as the best performing chunk size (the
|
64k chunk size was chosen as the best performing chunk size (the
|
||||||
authenticator takes too much time below this and the performance drops
|
authenticator takes too much time below this and the performance drops
|
||||||
@@ -771,15 +767,15 @@ This uses a 32 byte (256 bit key) key derived from the user password.
|
|||||||
|
|
||||||
1 byte file will encrypt to
|
1 byte file will encrypt to
|
||||||
|
|
||||||
- 32 bytes header
|
* 32 bytes header
|
||||||
- 17 bytes data chunk
|
* 17 bytes data chunk
|
||||||
|
|
||||||
49 bytes total
|
49 bytes total
|
||||||
|
|
||||||
1 MiB (1048576 bytes) file will encrypt to
|
1 MiB (1048576 bytes) file will encrypt to
|
||||||
|
|
||||||
- 32 bytes header
|
* 32 bytes header
|
||||||
- 16 chunks of 65568 bytes
|
* 16 chunks of 65568 bytes
|
||||||
|
|
||||||
1049120 bytes total (a 0.05% overhead). This is the overhead for big
|
1049120 bytes total (a 0.05% overhead). This is the overhead for big
|
||||||
files.
|
files.
|
||||||
@@ -802,8 +798,8 @@ it on the cloud storage system.
|
|||||||
|
|
||||||
This means that
|
This means that
|
||||||
|
|
||||||
- filenames with the same name will encrypt the same
|
* filenames with the same name will encrypt the same
|
||||||
- filenames which start the same won't have a common prefix
|
* filenames which start the same won't have a common prefix
|
||||||
|
|
||||||
This uses a 32 byte key (256 bits) and a 16 byte (128 bits) IV both of
|
This uses a 32 byte key (256 bits) and a 16 byte (128 bits) IV both of
|
||||||
which are derived from the user password.
|
which are derived from the user password.
|
||||||
@@ -812,8 +808,8 @@ After encryption they are written out using a modified version of
|
|||||||
standard `base32` encoding as described in RFC4648. The standard
|
standard `base32` encoding as described in RFC4648. The standard
|
||||||
encoding is modified in two ways:
|
encoding is modified in two ways:
|
||||||
|
|
||||||
- it becomes lower case (no-one likes upper case filenames!)
|
* it becomes lower case (no-one likes upper case filenames!)
|
||||||
- we strip the padding character `=`
|
* we strip the padding character `=`
|
||||||
|
|
||||||
`base32` is used rather than the more efficient `base64` so rclone can be
|
`base32` is used rather than the more efficient `base64` so rclone can be
|
||||||
used on case insensitive remotes (e.g. Windows, Box, Dropbox, Onedrive etc).
|
used on case insensitive remotes (e.g. Windows, Box, Dropbox, Onedrive etc).
|
||||||
@@ -829,7 +825,6 @@ then rclone uses an internal one.
|
|||||||
encrypted data. For full protection against this you should always use
|
encrypted data. For full protection against this you should always use
|
||||||
a salt.
|
a salt.
|
||||||
|
|
||||||
## See Also
|
## SEE ALSO
|
||||||
|
|
||||||
- [rclone cryptdecode](/commands/rclone_cryptdecode/) - Show forward/reverse
|
* [rclone cryptdecode](/commands/rclone_cryptdecode/) - Show forward/reverse mapping of encrypted filenames
|
||||||
mapping of encrypted filenames.
|
|
||||||
|
|||||||
@@ -45,27 +45,27 @@ on the host.
|
|||||||
The *FUSE* driver is a prerequisite for rclone mounting and should be
|
The *FUSE* driver is a prerequisite for rclone mounting and should be
|
||||||
installed on host:
|
installed on host:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo apt-get -y install fuse3
|
sudo apt-get -y install fuse3
|
||||||
```
|
```
|
||||||
|
|
||||||
Create two directories required by rclone docker plugin:
|
Create two directories required by rclone docker plugin:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo mkdir -p /var/lib/docker-plugins/rclone/config
|
sudo mkdir -p /var/lib/docker-plugins/rclone/config
|
||||||
sudo mkdir -p /var/lib/docker-plugins/rclone/cache
|
sudo mkdir -p /var/lib/docker-plugins/rclone/cache
|
||||||
```
|
```
|
||||||
|
|
||||||
Install the managed rclone docker plugin for your architecture (here `amd64`):
|
Install the managed rclone docker plugin for your architecture (here `amd64`):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin install rclone/docker-volume-rclone:amd64 args="-v" --alias rclone --grant-all-permissions
|
docker plugin install rclone/docker-volume-rclone:amd64 args="-v" --alias rclone --grant-all-permissions
|
||||||
docker plugin list
|
docker plugin list
|
||||||
```
|
```
|
||||||
|
|
||||||
Create your [SFTP volume](/sftp/#standard-options):
|
Create your [SFTP volume](/sftp/#standard-options):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume create firstvolume -d rclone -o type=sftp -o sftp-host=_hostname_ -o sftp-user=_username_ -o sftp-pass=_password_ -o allow-other=true
|
docker volume create firstvolume -d rclone -o type=sftp -o sftp-host=_hostname_ -o sftp-user=_username_ -o sftp-pass=_password_ -o allow-other=true
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -78,7 +78,7 @@ for example `-o path=/home/username`.
|
|||||||
|
|
||||||
Time to create a test container and mount the volume into it:
|
Time to create a test container and mount the volume into it:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker run --rm -it -v firstvolume:/mnt --workdir /mnt ubuntu:latest bash
|
docker run --rm -it -v firstvolume:/mnt --workdir /mnt ubuntu:latest bash
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -88,7 +88,7 @@ or otherwise play with it. Type `exit` when you are done.
|
|||||||
The container will stop but the volume will stay, ready to be reused.
|
The container will stop but the volume will stay, ready to be reused.
|
||||||
When it's not needed anymore, remove it:
|
When it's not needed anymore, remove it:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume list
|
docker volume list
|
||||||
docker volume remove firstvolume
|
docker volume remove firstvolume
|
||||||
```
|
```
|
||||||
@@ -126,7 +126,7 @@ token = {"access_token":...}
|
|||||||
Now create the file named `example.yml` with a swarm stack description
|
Now create the file named `example.yml` with a swarm stack description
|
||||||
like this:
|
like this:
|
||||||
|
|
||||||
```yaml
|
```yml
|
||||||
version: '3'
|
version: '3'
|
||||||
services:
|
services:
|
||||||
heimdall:
|
heimdall:
|
||||||
@@ -145,7 +145,7 @@ volumes:
|
|||||||
|
|
||||||
and run the stack:
|
and run the stack:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker stack deploy example -c ./example.yml
|
docker stack deploy example -c ./example.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -155,7 +155,7 @@ run service containers on one or more cluster nodes and request
|
|||||||
the `example_configdata` volume from rclone plugins on the node hosts.
|
the `example_configdata` volume from rclone plugins on the node hosts.
|
||||||
You can use the following commands to confirm results:
|
You can use the following commands to confirm results:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker service ls
|
docker service ls
|
||||||
docker service ps example_heimdall
|
docker service ps example_heimdall
|
||||||
docker volume ls
|
docker volume ls
|
||||||
@@ -173,7 +173,7 @@ the `docker volume remove example_configdata` command on every node.
|
|||||||
Volumes can be created with [docker volume create](https://docs.docker.com/engine/reference/commandline/volume_create/).
|
Volumes can be created with [docker volume create](https://docs.docker.com/engine/reference/commandline/volume_create/).
|
||||||
Here are a few examples:
|
Here are a few examples:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
|
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
|
||||||
docker volume create vol2 -d rclone -o remote=:storj,access_grant=xxx:heimdall
|
docker volume create vol2 -d rclone -o remote=:storj,access_grant=xxx:heimdall
|
||||||
docker volume create vol3 -d rclone -o type=storj -o path=heimdall -o storj-access-grant=xxx -o poll-interval=0
|
docker volume create vol3 -d rclone -o type=storj -o path=heimdall -o storj-access-grant=xxx -o poll-interval=0
|
||||||
@@ -186,7 +186,7 @@ option.
|
|||||||
|
|
||||||
Volumes can be inspected as follows:
|
Volumes can be inspected as follows:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume list
|
docker volume list
|
||||||
docker volume inspect vol1
|
docker volume inspect vol1
|
||||||
```
|
```
|
||||||
@@ -210,13 +210,13 @@ The `remote=:backend:dir/subdir` syntax can be used to create
|
|||||||
while the `type` and `path` options provide a simpler alternative for this.
|
while the `type` and `path` options provide a simpler alternative for this.
|
||||||
Using two split options
|
Using two split options
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
-o type=backend -o path=dir/subdir
|
-o type=backend -o path=dir/subdir
|
||||||
```
|
```
|
||||||
|
|
||||||
is equivalent to the combined syntax
|
is equivalent to the combined syntax
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
-o remote=:backend:dir/subdir
|
-o remote=:backend:dir/subdir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -262,13 +262,13 @@ Inside connection string the backend prefix must be dropped from parameter
|
|||||||
names but in the `-o param=value` array it must be present.
|
names but in the `-o param=value` array it must be present.
|
||||||
For instance, compare the following option array
|
For instance, compare the following option array
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
-o remote=:sftp:/home -o sftp-host=localhost
|
-o remote=:sftp:/home -o sftp-host=localhost
|
||||||
```
|
```
|
||||||
|
|
||||||
with equivalent connection string:
|
with equivalent connection string:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
-o remote=:sftp,host=localhost:/home
|
-o remote=:sftp,host=localhost:/home
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -295,7 +295,7 @@ Each of them should be named after its volume and have at least two
|
|||||||
elements, the self-explanatory `driver: rclone` value and the
|
elements, the self-explanatory `driver: rclone` value and the
|
||||||
`driver_opts:` structure playing the same role as `-o key=val` CLI flags:
|
`driver_opts:` structure playing the same role as `-o key=val` CLI flags:
|
||||||
|
|
||||||
```yaml
|
```yml
|
||||||
volumes:
|
volumes:
|
||||||
volume_name_1:
|
volume_name_1:
|
||||||
driver: rclone
|
driver: rclone
|
||||||
@@ -345,7 +345,7 @@ By default they must exist on host at the following locations
|
|||||||
You can [install managed plugin](https://docs.docker.com/engine/reference/commandline/plugin_install/)
|
You can [install managed plugin](https://docs.docker.com/engine/reference/commandline/plugin_install/)
|
||||||
with default settings as follows:
|
with default settings as follows:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin install rclone/docker-volume-rclone:amd64 --grant-all-permissions --alias rclone
|
docker plugin install rclone/docker-volume-rclone:amd64 --grant-all-permissions --alias rclone
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -388,7 +388,7 @@ mount namespaces and bind-mounts into requesting user containers.
|
|||||||
You can tweak a few plugin settings after installation when it's disabled
|
You can tweak a few plugin settings after installation when it's disabled
|
||||||
(not in use), for instance:
|
(not in use), for instance:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin disable rclone
|
docker plugin disable rclone
|
||||||
docker plugin set rclone RCLONE_VERBOSE=2 config=/etc/rclone args="--vfs-cache-mode=writes --allow-other"
|
docker plugin set rclone RCLONE_VERBOSE=2 config=/etc/rclone args="--vfs-cache-mode=writes --allow-other"
|
||||||
docker plugin enable rclone
|
docker plugin enable rclone
|
||||||
@@ -448,7 +448,7 @@ actual level assigned by rclone in the encapsulated message string.
|
|||||||
|
|
||||||
You can set custom plugin options right when you install it, *in one go*:
|
You can set custom plugin options right when you install it, *in one go*:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin remove rclone
|
docker plugin remove rclone
|
||||||
docker plugin install rclone/docker-volume-rclone:amd64 \
|
docker plugin install rclone/docker-volume-rclone:amd64 \
|
||||||
--alias rclone --grant-all-permissions \
|
--alias rclone --grant-all-permissions \
|
||||||
@@ -463,7 +463,7 @@ to inform the docker daemon that a volume is (un-)available.
|
|||||||
As a workaround you can setup a healthcheck to verify that the mount
|
As a workaround you can setup a healthcheck to verify that the mount
|
||||||
is responding, for example:
|
is responding, for example:
|
||||||
|
|
||||||
```yaml
|
```yml
|
||||||
services:
|
services:
|
||||||
my_service:
|
my_service:
|
||||||
image: my_image
|
image: my_image
|
||||||
@@ -486,7 +486,7 @@ You can just run it (type `rclone serve docker` and hit enter) for the test.
|
|||||||
|
|
||||||
Install *FUSE*:
|
Install *FUSE*:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo apt-get -y install fuse
|
sudo apt-get -y install fuse
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -496,7 +496,7 @@ and [docker-volume-rclone.socket](https://raw.githubusercontent.com/rclone/rclon
|
|||||||
|
|
||||||
Put them to the `/etc/systemd/system/` directory:
|
Put them to the `/etc/systemd/system/` directory:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
cp docker-volume-plugin.service /etc/systemd/system/
|
cp docker-volume-plugin.service /etc/systemd/system/
|
||||||
cp docker-volume-plugin.socket /etc/systemd/system/
|
cp docker-volume-plugin.socket /etc/systemd/system/
|
||||||
```
|
```
|
||||||
@@ -505,7 +505,7 @@ Please note that all commands in this section must be run as *root* but
|
|||||||
we omit `sudo` prefix for brevity.
|
we omit `sudo` prefix for brevity.
|
||||||
Now create directories required by the service:
|
Now create directories required by the service:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
mkdir -p /var/lib/docker-volumes/rclone
|
mkdir -p /var/lib/docker-volumes/rclone
|
||||||
mkdir -p /var/lib/docker-plugins/rclone/config
|
mkdir -p /var/lib/docker-plugins/rclone/config
|
||||||
mkdir -p /var/lib/docker-plugins/rclone/cache
|
mkdir -p /var/lib/docker-plugins/rclone/cache
|
||||||
@@ -513,7 +513,7 @@ mkdir -p /var/lib/docker-plugins/rclone/cache
|
|||||||
|
|
||||||
Run the docker plugin service in the socket activated mode:
|
Run the docker plugin service in the socket activated mode:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl start docker-volume-rclone.service
|
systemctl start docker-volume-rclone.service
|
||||||
systemctl enable docker-volume-rclone.socket
|
systemctl enable docker-volume-rclone.socket
|
||||||
@@ -540,7 +540,7 @@ prefer socket activation.
|
|||||||
You can [see managed plugin settings](https://docs.docker.com/engine/extend/#debugging-plugins)
|
You can [see managed plugin settings](https://docs.docker.com/engine/extend/#debugging-plugins)
|
||||||
with
|
with
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin list
|
docker plugin list
|
||||||
docker plugin inspect rclone
|
docker plugin inspect rclone
|
||||||
```
|
```
|
||||||
@@ -555,20 +555,20 @@ but their actual level can be seen from encapsulated message string.
|
|||||||
You will usually install the latest version of managed plugin for your platform.
|
You will usually install the latest version of managed plugin for your platform.
|
||||||
Use the following commands to print the actual installed version:
|
Use the following commands to print the actual installed version:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
PLUGID=$(docker plugin list --no-trunc | awk '/rclone/{print$1}')
|
PLUGID=$(docker plugin list --no-trunc | awk '/rclone/{print$1}')
|
||||||
sudo runc --root /run/docker/runtime-runc/plugins.moby exec $PLUGID rclone version
|
sudo runc --root /run/docker/runtime-runc/plugins.moby exec $PLUGID rclone version
|
||||||
```
|
```
|
||||||
|
|
||||||
You can even use `runc` to run shell inside the plugin container:
|
You can even use `runc` to run shell inside the plugin container:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo runc --root /run/docker/runtime-runc/plugins.moby exec --tty $PLUGID bash
|
sudo runc --root /run/docker/runtime-runc/plugins.moby exec --tty $PLUGID bash
|
||||||
```
|
```
|
||||||
|
|
||||||
Also you can use curl to check the plugin socket connectivity:
|
Also you can use curl to check the plugin socket connectivity:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin list --no-trunc
|
docker plugin list --no-trunc
|
||||||
PLUGID=123abc...
|
PLUGID=123abc...
|
||||||
sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate
|
sudo curl -H Content-Type:application/json -XPOST -d {} --unix-socket /run/docker/plugins/$PLUGID/rclone.sock http://localhost/Plugin.Activate
|
||||||
@@ -582,7 +582,7 @@ diagnosing with the above methods, you can try clearing the state of the plugin.
|
|||||||
This might be needed because a reinstall don't cleanup existing state files to
|
This might be needed because a reinstall don't cleanup existing state files to
|
||||||
allow for easy restoration, as stated above.
|
allow for easy restoration, as stated above.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker plugin disable rclone # disable the plugin to ensure no interference
|
docker plugin disable rclone # disable the plugin to ensure no interference
|
||||||
sudo rm /var/lib/docker-plugins/rclone/cache/docker-plugin.state # removing the plugin state
|
sudo rm /var/lib/docker-plugins/rclone/cache/docker-plugin.state # removing the plugin state
|
||||||
docker plugin enable rclone # re-enable the plugin afterward
|
docker plugin enable rclone # re-enable the plugin afterward
|
||||||
@@ -598,14 +598,14 @@ it won't even return an error. I hope that docker maintainers will fix
|
|||||||
this some day. In the meantime be aware that you must remove your volume
|
this some day. In the meantime be aware that you must remove your volume
|
||||||
before recreating it with new settings:
|
before recreating it with new settings:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume remove my_vol
|
docker volume remove my_vol
|
||||||
docker volume create my_vol -d rclone -o opt1=new_val1 ...
|
docker volume create my_vol -d rclone -o opt1=new_val1 ...
|
||||||
```
|
```
|
||||||
|
|
||||||
and verify that settings did update:
|
and verify that settings did update:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
docker volume list
|
docker volume list
|
||||||
docker volume inspect my_vol
|
docker volume inspect my_vol
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ file and choose its location.)
|
|||||||
The easiest way to make the config is to run rclone with the config
|
The easiest way to make the config is to run rclone with the config
|
||||||
option:
|
option:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -100,7 +100,7 @@ Rclone syncs a directory tree from one storage system to another.
|
|||||||
|
|
||||||
Its syntax is like this
|
Its syntax is like this
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone subcommand [options] <parameters> <parameters...>
|
rclone subcommand [options] <parameters> <parameters...>
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -115,7 +115,7 @@ used before the `subcommand`. Anything after a `--` option will not be
|
|||||||
interpreted as an option so if you need to add a parameter which
|
interpreted as an option so if you need to add a parameter which
|
||||||
starts with a `-` then put a `--` on its own first, eg
|
starts with a `-` then put a `--` on its own first, eg
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf -- -directory-starting-with-dash
|
rclone lsf -- -directory-starting-with-dash
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -136,7 +136,7 @@ learning rclone to avoid accidental data loss.
|
|||||||
|
|
||||||
rclone uses a system of subcommands. For example
|
rclone uses a system of subcommands. For example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:path # lists a remote
|
rclone ls remote:path # lists a remote
|
||||||
rclone copy /local/path remote:path # copies /local/path to the remote
|
rclone copy /local/path remote:path # copies /local/path to the remote
|
||||||
rclone sync --interactive /local/path remote:path # syncs /local/path to the remote
|
rclone sync --interactive /local/path remote:path # syncs /local/path to the remote
|
||||||
@@ -192,7 +192,7 @@ directory` if it isn't.
|
|||||||
For example, suppose you have a remote with a file in called
|
For example, suppose you have a remote with a file in called
|
||||||
`test.jpg`, then you could copy just that file like this
|
`test.jpg`, then you could copy just that file like this
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy remote:test.jpg /tmp/download
|
rclone copy remote:test.jpg /tmp/download
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -200,13 +200,13 @@ The file `test.jpg` will be placed inside `/tmp/download`.
|
|||||||
|
|
||||||
This is equivalent to specifying
|
This is equivalent to specifying
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --files-from /tmp/files remote: /tmp/download
|
rclone copy --files-from /tmp/files remote: /tmp/download
|
||||||
```
|
```
|
||||||
|
|
||||||
Where `/tmp/files` contains the single line
|
Where `/tmp/files` contains the single line
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
test.jpg
|
test.jpg
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -252,25 +252,25 @@ the command line (or in environment variables).
|
|||||||
|
|
||||||
Here are some examples:
|
Here are some examples:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd --http-url https://pub.rclone.org :http:
|
rclone lsd --http-url https://pub.rclone.org :http:
|
||||||
```
|
```
|
||||||
|
|
||||||
To list all the directories in the root of `https://pub.rclone.org/`.
|
To list all the directories in the root of `https://pub.rclone.org/`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf --http-url https://example.com :http:path/to/dir
|
rclone lsf --http-url https://example.com :http:path/to/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
To list files and directories in `https://example.com/path/to/dir/`
|
To list files and directories in `https://example.com/path/to/dir/`
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
|
rclone copy --http-url https://example.com :http:path/to/dir /tmp/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
|
To copy files and directories in `https://example.com/path/to/dir` to `/tmp/dir`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
|
rclone copy --sftp-host example.com :sftp:path/to/dir /tmp/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -284,7 +284,7 @@ syntax, so instead of providing the arguments as command line
|
|||||||
parameters `--http-url https://pub.rclone.org` they are provided as
|
parameters `--http-url https://pub.rclone.org` they are provided as
|
||||||
part of the remote specification as a kind of connection string.
|
part of the remote specification as a kind of connection string.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd ":http,url='https://pub.rclone.org':"
|
rclone lsd ":http,url='https://pub.rclone.org':"
|
||||||
rclone lsf ":http,url='https://example.com':path/to/dir"
|
rclone lsf ":http,url='https://example.com':path/to/dir"
|
||||||
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
||||||
@@ -295,7 +295,7 @@ These can apply to modify existing remotes as well as create new
|
|||||||
remotes with the on the fly syntax. This example is equivalent to
|
remotes with the on the fly syntax. This example is equivalent to
|
||||||
adding the `--drive-shared-with-me` parameter to the remote `gdrive:`.
|
adding the `--drive-shared-with-me` parameter to the remote `gdrive:`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsf "gdrive,shared_with_me:path/to/dir"
|
rclone lsf "gdrive,shared_with_me:path/to/dir"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -306,13 +306,13 @@ file shared on google drive to the normal drive which **does not
|
|||||||
work** because the `--drive-shared-with-me` flag applies to both the
|
work** because the `--drive-shared-with-me` flag applies to both the
|
||||||
source and the destination.
|
source and the destination.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --drive-shared-with-me gdrive:shared-file.txt gdrive:
|
rclone copy --drive-shared-with-me gdrive:shared-file.txt gdrive:
|
||||||
```
|
```
|
||||||
|
|
||||||
However using the connection string syntax, this does work.
|
However using the connection string syntax, this does work.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy "gdrive,shared_with_me:shared-file.txt" gdrive:
|
rclone copy "gdrive,shared_with_me:shared-file.txt" gdrive:
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -321,13 +321,13 @@ backend. If for example gdriveCrypt is a crypt based on gdrive, then the
|
|||||||
following command **will not work** as intended, because
|
following command **will not work** as intended, because
|
||||||
`shared_with_me` is ignored by the crypt backend:
|
`shared_with_me` is ignored by the crypt backend:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy "gdriveCrypt,shared_with_me:shared-file.txt" gdriveCrypt:
|
rclone copy "gdriveCrypt,shared_with_me:shared-file.txt" gdriveCrypt:
|
||||||
```
|
```
|
||||||
|
|
||||||
The connection strings have the following syntax
|
The connection strings have the following syntax
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
remote,parameter=value,parameter2=value2:path/to/dir
|
remote,parameter=value,parameter2=value2:path/to/dir
|
||||||
:backend,parameter=value,parameter2=value2:path/to/dir
|
:backend,parameter=value,parameter2=value2:path/to/dir
|
||||||
```
|
```
|
||||||
@@ -335,7 +335,7 @@ remote,parameter=value,parameter2=value2:path/to/dir
|
|||||||
If the `parameter` has a `:` or `,` then it must be placed in quotes `"` or
|
If the `parameter` has a `:` or `,` then it must be placed in quotes `"` or
|
||||||
`'`, so
|
`'`, so
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
remote,parameter="colon:value",parameter2="comma,value":path/to/dir
|
remote,parameter="colon:value",parameter2="comma,value":path/to/dir
|
||||||
:backend,parameter='colon:value',parameter2='comma,value':path/to/dir
|
:backend,parameter='colon:value',parameter2='comma,value':path/to/dir
|
||||||
```
|
```
|
||||||
@@ -343,7 +343,7 @@ remote,parameter="colon:value",parameter2="comma,value":path/to/dir
|
|||||||
If a quoted value needs to include that quote, then it should be
|
If a quoted value needs to include that quote, then it should be
|
||||||
doubled, so
|
doubled, so
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
remote,parameter="with""quote",parameter2='with''quote':path/to/dir
|
remote,parameter="with""quote",parameter2='with''quote':path/to/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -354,13 +354,13 @@ If you leave off the `=parameter` then rclone will substitute `=true`
|
|||||||
which works very well with flags. For example, to use s3 configured in
|
which works very well with flags. For example, to use s3 configured in
|
||||||
the environment you could use:
|
the environment you could use:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd :s3,env_auth:
|
rclone lsd :s3,env_auth:
|
||||||
```
|
```
|
||||||
|
|
||||||
Which is equivalent to
|
Which is equivalent to
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd :s3,env_auth=true:
|
rclone lsd :s3,env_auth=true:
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -372,7 +372,7 @@ If you are a shell master then you'll know which strings are OK and
|
|||||||
which aren't, but if you aren't sure then enclose them in `"` and use
|
which aren't, but if you aren't sure then enclose them in `"` and use
|
||||||
`'` as the inside quote. This syntax works on all OSes.
|
`'` as the inside quote. This syntax works on all OSes.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
rclone copy ":http,url='https://example.com':path/to/dir" /tmp/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -381,7 +381,7 @@ strings in the shell (notably `\` and `$` and `"`) so if your strings
|
|||||||
contain those you can swap the roles of `"` and `'` thus. (This syntax
|
contain those you can swap the roles of `"` and `'` thus. (This syntax
|
||||||
does not work on Windows.)
|
does not work on Windows.)
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir
|
rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -394,13 +394,13 @@ If you supply extra configuration to a backend by command line flag,
|
|||||||
environment variable or connection string then rclone will add a
|
environment variable or connection string then rclone will add a
|
||||||
suffix based on the hash of the config to the name of the remote, eg
|
suffix based on the hash of the config to the name of the remote, eg
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone -vv lsf --s3-chunk-size 20M s3:
|
rclone -vv lsf --s3-chunk-size 20M s3:
|
||||||
```
|
```
|
||||||
|
|
||||||
Has the log message
|
Has the log message
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
DEBUG : s3: detected overridden config - adding "{Srj1p}" suffix to name
|
DEBUG : s3: detected overridden config - adding "{Srj1p}" suffix to name
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -411,13 +411,13 @@ This should only be noticeable in the logs.
|
|||||||
|
|
||||||
This means that on the fly backends such as
|
This means that on the fly backends such as
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone -vv lsf :s3,env_auth:
|
rclone -vv lsf :s3,env_auth:
|
||||||
```
|
```
|
||||||
|
|
||||||
Will get their own names
|
Will get their own names
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
DEBUG : :s3: detected overridden config - adding "{YTu53}" suffix to name
|
DEBUG : :s3: detected overridden config - adding "{YTu53}" suffix to name
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -551,13 +551,13 @@ Here are some gotchas which may help users unfamiliar with the shell rules
|
|||||||
If your names have spaces or shell metacharacters (e.g. `*`, `?`, `$`,
|
If your names have spaces or shell metacharacters (e.g. `*`, `?`, `$`,
|
||||||
`'`, `"`, etc.) then you must quote them. Use single quotes `'` by default.
|
`'`, `"`, etc.) then you must quote them. Use single quotes `'` by default.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy 'Important files?' remote:backup
|
rclone copy 'Important files?' remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to send a `'` you will need to use `"`, e.g.
|
If you want to send a `'` you will need to use `"`, e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy "O'Reilly Reviews" remote:backup
|
rclone copy "O'Reilly Reviews" remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -590,13 +590,13 @@ file or directory like this then use the full path starting with a
|
|||||||
|
|
||||||
So to sync a directory called `sync:me` to a remote called `remote:` use
|
So to sync a directory called `sync:me` to a remote called `remote:` use
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive ./sync:me remote:path
|
rclone sync --interactive ./sync:me remote:path
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /full/path/to/sync:me remote:path
|
rclone sync --interactive /full/path/to/sync:me remote:path
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -611,7 +611,7 @@ to copy them in place.
|
|||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy s3:oldbucket s3:newbucket
|
rclone copy s3:oldbucket s3:newbucket
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -632,7 +632,7 @@ same.
|
|||||||
|
|
||||||
This can be used when scripting to make aged backups efficiently, e.g.
|
This can be used when scripting to make aged backups efficiently, e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive remote:current-backup remote:previous-backup
|
rclone sync --interactive remote:current-backup remote:previous-backup
|
||||||
rclone sync --interactive /path/to/files remote:current-backup
|
rclone sync --interactive /path/to/files remote:current-backup
|
||||||
```
|
```
|
||||||
@@ -872,7 +872,7 @@ excluded by a filter rule.
|
|||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /path/to/local remote:current --backup-dir remote:old
|
rclone sync --interactive /path/to/local remote:current --backup-dir remote:old
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -902,7 +902,7 @@ You can use `--bind 0.0.0.0` to force rclone to use IPv4 addresses and
|
|||||||
|
|
||||||
This option controls the bandwidth limit. For example
|
This option controls the bandwidth limit. For example
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit 10M
|
--bwlimit 10M
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -914,7 +914,7 @@ suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth.
|
|||||||
The upload and download bandwidth can be specified separately, as
|
The upload and download bandwidth can be specified separately, as
|
||||||
`--bwlimit UP:DOWN`, so
|
`--bwlimit UP:DOWN`, so
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit 10M:100k
|
--bwlimit 10M:100k
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -922,7 +922,7 @@ would mean limit the upload bandwidth to 10 MiB/s and the download
|
|||||||
bandwidth to 100 KiB/s. Either limit can be "off" meaning no limit, so
|
bandwidth to 100 KiB/s. Either limit can be "off" meaning no limit, so
|
||||||
to just limit the upload bandwidth you would use
|
to just limit the upload bandwidth you would use
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit 10M:off
|
--bwlimit 10M:off
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -979,13 +979,13 @@ be unlimited.
|
|||||||
Timeslots without `WEEKDAY` are extended to the whole week. So this
|
Timeslots without `WEEKDAY` are extended to the whole week. So this
|
||||||
example:
|
example:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit "Mon-00:00,512 12:00,1M Sun-20:00,off"
|
--bwlimit "Mon-00:00,512 12:00,1M Sun-20:00,off"
|
||||||
```
|
```
|
||||||
|
|
||||||
Is equivalent to this:
|
Is equivalent to this:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit "Mon-00:00,512Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-12:00,1M Sun-20:00,off"
|
--bwlimit "Mon-00:00,512Mon-12:00,1M Tue-12:00,1M Wed-12:00,1M Thu-12:00,1M Fri-12:00,1M Sat-12:00,1M Sun-12:00,1M Sun-20:00,off"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1005,14 +1005,14 @@ of a long running rclone transfer and to restore it back to the value specified
|
|||||||
with `--bwlimit` quickly when needed. Assuming there is only one rclone instance
|
with `--bwlimit` quickly when needed. Assuming there is only one rclone instance
|
||||||
running, you can toggle the limiter like this:
|
running, you can toggle the limiter like this:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
kill -SIGUSR2 $(pidof rclone)
|
kill -SIGUSR2 $(pidof rclone)
|
||||||
```
|
```
|
||||||
|
|
||||||
If you configure rclone with a [remote control](/rc) then you can use
|
If you configure rclone with a [remote control](/rc) then you can use
|
||||||
change the bwlimit dynamically:
|
change the bwlimit dynamically:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone rc core/bwlimit rate=1M
|
rclone rc core/bwlimit rate=1M
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1023,7 +1023,7 @@ This option controls per file bandwidth limit. For the options see the
|
|||||||
|
|
||||||
For example use this to allow no transfers to be faster than 1 MiB/s
|
For example use this to allow no transfers to be faster than 1 MiB/s
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--bwlimit-file 1M
|
--bwlimit-file 1M
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1313,7 +1313,7 @@ time rclone started up.
|
|||||||
This disables a comma separated list of optional features. For example
|
This disables a comma separated list of optional features. For example
|
||||||
to disable server-side move and server-side copy use:
|
to disable server-side move and server-side copy use:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--disable move,copy
|
--disable move,copy
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1321,13 +1321,13 @@ The features can be put in any case.
|
|||||||
|
|
||||||
To see a list of which features can be disabled use:
|
To see a list of which features can be disabled use:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--disable help
|
--disable help
|
||||||
```
|
```
|
||||||
|
|
||||||
The features a remote has can be seen in JSON format with:
|
The features a remote has can be seen in JSON format with:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone backend features remote:
|
rclone backend features remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1367,7 +1367,7 @@ support ([RFC 8622](https://tools.ietf.org/html/rfc8622)).
|
|||||||
|
|
||||||
For example, if you configured QoS on router to handle LE properly. Running:
|
For example, if you configured QoS on router to handle LE properly. Running:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --dscp LE from:/from to:/to
|
rclone copy --dscp LE from:/from to:/to
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1459,7 +1459,7 @@ This flag is supported for all HTTP based backends even those not
|
|||||||
supported by `--header-upload` and `--header-download` so may be used
|
supported by `--header-upload` and `--header-download` so may be used
|
||||||
as a workaround for those with care.
|
as a workaround for those with care.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:test --header "X-Rclone: Foo" --header "X-LetMeIn: Yes"
|
rclone ls remote:test --header "X-Rclone: Foo" --header "X-LetMeIn: Yes"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1468,7 +1468,7 @@ rclone ls remote:test --header "X-Rclone: Foo" --header "X-LetMeIn: Yes"
|
|||||||
Add an HTTP header for all download transactions. The flag can be repeated to
|
Add an HTTP header for all download transactions. The flag can be repeated to
|
||||||
add multiple headers.
|
add multiple headers.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
rclone sync --interactive s3:test/src ~/dst --header-download "X-Amz-Meta-Test: Foo" --header-download "X-Amz-Meta-Test2: Bar"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1480,7 +1480,7 @@ currently supported backends.
|
|||||||
Add an HTTP header for all upload transactions. The flag can be repeated to add
|
Add an HTTP header for all upload transactions. The flag can be repeated to add
|
||||||
multiple headers.
|
multiple headers.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
rclone sync --interactive ~/src s3:test/dst --header-upload "Content-Disposition: attachment; filename='cool.html'" --header-upload "X-Amz-Meta-Test: FooBar"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1658,7 +1658,7 @@ especially with `rclone sync`.
|
|||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone delete --interactive /tmp/dir
|
$ rclone delete --interactive /tmp/dir
|
||||||
rclone: delete "important-file.txt"?
|
rclone: delete "important-file.txt"?
|
||||||
y) Yes, this is OK (default)
|
y) Yes, this is OK (default)
|
||||||
@@ -1748,7 +1748,7 @@ ignored.
|
|||||||
|
|
||||||
For example if the following flags are in use
|
For example if the following flags are in use
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone --log-file rclone.log --log-file-max-size 1M --log-file-max-backups 3
|
rclone --log-file rclone.log --log-file-max-size 1M --log-file-max-backups 3
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -1843,7 +1843,7 @@ once as administrator to create the registry key in advance.
|
|||||||
severe) than or equal to the `--log-level`. For example to log DEBUG
|
severe) than or equal to the `--log-level`. For example to log DEBUG
|
||||||
to a log file but ERRORs to the event log you would use
|
to a log file but ERRORs to the event log you would use
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--log-file rclone.log --log-level DEBUG --windows-event-log ERROR
|
--log-file rclone.log --log-level DEBUG --windows-event-log ERROR
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -2074,7 +2074,7 @@ it in `"`, if you want a literal `"` in an argument then enclose the
|
|||||||
argument in `"` and double the `"`. See [CSV encoding](https://godoc.org/encoding/csv)
|
argument in `"` and double the `"`. See [CSV encoding](https://godoc.org/encoding/csv)
|
||||||
for more info.
|
for more info.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--metadata-mapper "python bin/test_metadata_mapper.py"
|
--metadata-mapper "python bin/test_metadata_mapper.py"
|
||||||
--metadata-mapper 'python bin/test_metadata_mapper.py "argument with a space"'
|
--metadata-mapper 'python bin/test_metadata_mapper.py "argument with a space"'
|
||||||
--metadata-mapper 'python bin/test_metadata_mapper.py "argument with ""two"" quotes"'
|
--metadata-mapper 'python bin/test_metadata_mapper.py "argument with ""two"" quotes"'
|
||||||
@@ -2445,7 +2445,7 @@ for more info.
|
|||||||
|
|
||||||
Eg
|
Eg
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
--password-command "echo hello"
|
--password-command "echo hello"
|
||||||
--password-command 'echo "hello with space"'
|
--password-command 'echo "hello with space"'
|
||||||
--password-command 'echo "hello with ""quotes"" and space"'
|
--password-command 'echo "hello with ""quotes"" and space"'
|
||||||
@@ -2650,7 +2650,7 @@ or with `--backup-dir`. See `--backup-dir` for more info.
|
|||||||
|
|
||||||
For example
|
For example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy --interactive /path/to/local/file remote:current --suffix .bak
|
rclone copy --interactive /path/to/local/file remote:current --suffix .bak
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -2661,7 +2661,7 @@ If using `rclone sync` with `--suffix` and without `--backup-dir` then
|
|||||||
it is recommended to put a filter rule in excluding the suffix
|
it is recommended to put a filter rule in excluding the suffix
|
||||||
otherwise the `sync` will delete the backup files.
|
otherwise the `sync` will delete the backup files.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
rclone sync --interactive /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -3044,7 +3044,7 @@ have to supply the password every time you start rclone.
|
|||||||
|
|
||||||
To add a password to your rclone configuration, execute `rclone config`.
|
To add a password to your rclone configuration, execute `rclone config`.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone config
|
$ rclone config
|
||||||
Current remotes:
|
Current remotes:
|
||||||
|
|
||||||
@@ -3058,7 +3058,7 @@ e/n/d/s/q>
|
|||||||
|
|
||||||
Go into `s`, Set configuration password:
|
Go into `s`, Set configuration password:
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
e/n/d/s/q> s
|
e/n/d/s/q> s
|
||||||
Your configuration is not encrypted.
|
Your configuration is not encrypted.
|
||||||
If you add a password, you will protect your login information to cloud services.
|
If you add a password, you will protect your login information to cloud services.
|
||||||
@@ -3131,7 +3131,7 @@ environment variables. The script is supplied either via
|
|||||||
One useful example of this is using the `passwordstore` application
|
One useful example of this is using the `passwordstore` application
|
||||||
to retrieve the password:
|
to retrieve the password:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export RCLONE_PASSWORD_COMMAND="pass rclone/config"
|
export RCLONE_PASSWORD_COMMAND="pass rclone/config"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -3177,13 +3177,13 @@ at rest or transfer. Detailed instructions for popular OSes:
|
|||||||
|
|
||||||
- Generate and store a password
|
- Generate and store a password
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
security add-generic-password -a rclone -s config -w $(openssl rand -base64 40)
|
security add-generic-password -a rclone -s config -w $(openssl rand -base64 40)
|
||||||
```
|
```
|
||||||
|
|
||||||
- Add the retrieval instruction to your `.zprofile` / `.profile`
|
- Add the retrieval instruction to your `.zprofile` / `.profile`
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export RCLONE_PASSWORD_COMMAND="/usr/bin/security find-generic-password -a rclone -s config -w"
|
export RCLONE_PASSWORD_COMMAND="/usr/bin/security find-generic-password -a rclone -s config -w"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -3196,13 +3196,13 @@ at rest or transfer. Detailed instructions for popular OSes:
|
|||||||
|
|
||||||
- Generate and store a password
|
- Generate and store a password
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
echo $(openssl rand -base64 40) | pass insert -m rclone/config
|
echo $(openssl rand -base64 40) | pass insert -m rclone/config
|
||||||
```
|
```
|
||||||
|
|
||||||
- Add the retrieval instruction
|
- Add the retrieval instruction
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export RCLONE_PASSWORD_COMMAND="/usr/bin/pass rclone/config"
|
export RCLONE_PASSWORD_COMMAND="/usr/bin/pass rclone/config"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -3210,13 +3210,13 @@ at rest or transfer. Detailed instructions for popular OSes:
|
|||||||
|
|
||||||
- Generate and store a password
|
- Generate and store a password
|
||||||
|
|
||||||
```powershell
|
```pwsh
|
||||||
New-Object -TypeName PSCredential -ArgumentList "rclone", (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path "rclone-credential.xml"
|
New-Object -TypeName PSCredential -ArgumentList "rclone", (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path "rclone-credential.xml"
|
||||||
```
|
```
|
||||||
|
|
||||||
- Add the password retrieval instruction
|
- Add the password retrieval instruction
|
||||||
|
|
||||||
```powershell
|
```pwsh
|
||||||
[Environment]::SetEnvironmentVariable("RCLONE_PASSWORD_COMMAND", "[System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path "rclone-credential.xml").Password))")
|
[Environment]::SetEnvironmentVariable("RCLONE_PASSWORD_COMMAND", "[System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path "rclone-credential.xml").Password))")
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -3462,7 +3462,7 @@ so it can only contain letters, digits, or the `_` (underscore) character.
|
|||||||
For example, to configure an S3 remote named `mys3:` without a config
|
For example, to configure an S3 remote named `mys3:` without a config
|
||||||
file (using unix ways of setting environment variables):
|
file (using unix ways of setting environment variables):
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ export RCLONE_CONFIG_MYS3_TYPE=s3
|
$ export RCLONE_CONFIG_MYS3_TYPE=s3
|
||||||
$ export RCLONE_CONFIG_MYS3_ACCESS_KEY_ID=XXX
|
$ export RCLONE_CONFIG_MYS3_ACCESS_KEY_ID=XXX
|
||||||
$ export RCLONE_CONFIG_MYS3_SECRET_ACCESS_KEY=XXX
|
$ export RCLONE_CONFIG_MYS3_SECRET_ACCESS_KEY=XXX
|
||||||
@@ -3482,7 +3482,7 @@ You must write the name in uppercase in the environment variable, but
|
|||||||
as seen from example above it will be listed and can be accessed in
|
as seen from example above it will be listed and can be accessed in
|
||||||
lowercase, while you can also refer to the same remote in uppercase:
|
lowercase, while you can also refer to the same remote in uppercase:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone lsd mys3:
|
$ rclone lsd mys3:
|
||||||
-1 2016-09-21 12:54:21 -1 my-bucket
|
-1 2016-09-21 12:54:21 -1 my-bucket
|
||||||
$ rclone lsd MYS3:
|
$ rclone lsd MYS3:
|
||||||
@@ -3497,7 +3497,7 @@ set the access key of all remotes using S3, including myS3Crypt.
|
|||||||
Note also that now rclone has [connection strings](#connection-strings),
|
Note also that now rclone has [connection strings](#connection-strings),
|
||||||
it is probably easier to use those instead which makes the above example
|
it is probably easier to use those instead which makes the above example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX:
|
rclone lsd :s3,access_key_id=XXX,secret_access_key=XXX:
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
---
|
---
|
||||||
title: "DOI"
|
title: "DOI"
|
||||||
description: "Rclone docs for DOI"
|
description: "Rclone docs for DOI"
|
||||||
versionIntroduced: "v1.70"
|
versionIntroduced: "?"
|
||||||
---
|
---
|
||||||
|
|
||||||
# {{< icon "fa fa-building-columns" >}} DOI
|
# {{< icon "fa fa-building-columns" >}} DOI
|
||||||
@@ -27,7 +27,7 @@ Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -67,7 +67,7 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/doi/doi.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/doi/doi.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to doi (DOI datasets).
|
Here are the Standard options specific to doi (DOI datasets).
|
||||||
@@ -188,4 +188,4 @@ will default to those currently in use.
|
|||||||
It doesn't return anything.
|
It doesn't return anything.
|
||||||
|
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
@@ -56,13 +56,13 @@ signatures on the release.
|
|||||||
|
|
||||||
To install rclone on Linux/macOS/BSD systems, run:
|
To install rclone on Linux/macOS/BSD systems, run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
sudo -v ; curl https://rclone.org/install.sh | sudo bash
|
||||||
```
|
```
|
||||||
|
|
||||||
For beta installation, run:
|
For beta installation, run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
sudo -v ; curl https://rclone.org/install.sh | sudo bash -s beta
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ through it.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -110,19 +110,19 @@ You can then use it like this,
|
|||||||
|
|
||||||
List directories in top level of your drive
|
List directories in top level of your drive
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your drive
|
List all the files in your drive
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy a local directory to a drive directory called backup
|
To copy a local directory to a drive directory called backup
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy /home/source remote:backup
|
rclone copy /home/source remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -270,7 +270,7 @@ account key" button.
|
|||||||
|
|
||||||
##### 3. Configure rclone, assuming a new install
|
##### 3. Configure rclone, assuming a new install
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
|
|
||||||
n/s/q> n # New
|
n/s/q> n # New
|
||||||
@@ -375,7 +375,7 @@ It will use the `--checkers` value to specify the number of requests to run in
|
|||||||
In tests, these batch requests were up to 20x faster than the regular method.
|
In tests, these batch requests were up to 20x faster than the regular method.
|
||||||
Running the following command against different sized folders gives:
|
Running the following command against different sized folders gives:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsjson -vv -R --checkers=6 gdrive:folder
|
rclone lsjson -vv -R --checkers=6 gdrive:folder
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -598,7 +598,7 @@ Google Documents.
|
|||||||
| url | INI style link file | macOS, Windows |
|
| url | INI style link file | macOS, Windows |
|
||||||
| webloc | macOS specific XML format | macOS |
|
| webloc | macOS specific XML format | macOS |
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/drive/drive.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/drive/drive.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to drive (Google Drive).
|
Here are the Standard options specific to drive (Google Drive).
|
||||||
@@ -1792,7 +1792,7 @@ Third delete all orphaned files to the trash
|
|||||||
rclone backend rescue drive: -o delete
|
rclone backend rescue drive: -o delete
|
||||||
|
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@@ -1870,12 +1870,7 @@ second that each client_id can do set by Google. rclone already has a
|
|||||||
high quota and I will continue to make sure it is high enough by
|
high quota and I will continue to make sure it is high enough by
|
||||||
contacting Google.
|
contacting Google.
|
||||||
|
|
||||||
It is strongly recommended to use your own client ID as the default
|
It is strongly recommended to use your own client ID as the default rclone ID is heavily used. If you have multiple services running, it is recommended to use an API key for each service. The default Google quota is 10 transactions per second so it is recommended to stay under that number as if you use more than that, it will cause rclone to rate limit and make things slower.
|
||||||
rclone ID is heavily used. If you have multiple services running, it
|
|
||||||
is recommended to use an API key for each service. The default Google
|
|
||||||
quota is 10 transactions per second so it is recommended to stay under
|
|
||||||
that number as if you use more than that, it will cause rclone to rate
|
|
||||||
limit and make things slower.
|
|
||||||
|
|
||||||
Here is how to create your own Google Drive client ID for rclone:
|
Here is how to create your own Google Drive client ID for rclone:
|
||||||
|
|
||||||
@@ -1893,42 +1888,37 @@ be the same account as the Google Drive you want to access)
|
|||||||
credentials", which opens the wizard).
|
credentials", which opens the wizard).
|
||||||
|
|
||||||
5. If you already configured an "Oauth Consent Screen", then skip
|
5. If you already configured an "Oauth Consent Screen", then skip
|
||||||
to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
||||||
(near the top right corner of the right panel), then click "Get started".
|
(near the top right corner of the right panel), then click "Get started".
|
||||||
On the next screen, enter an "Application name"
|
On the next screen, enter an "Application name"
|
||||||
("rclone" is OK); enter "User Support Email" (your own email is OK);
|
("rclone" is OK); enter "User Support Email" (your own email is OK);
|
||||||
Next, under Audience select "External". Next enter your own contact information,
|
Next, under Audience select "External". Next enter your own contact information,
|
||||||
agree to terms and click "Create". You should now see rclone (or your project name)
|
agree to terms and click "Create". You should now see rclone (or your project name)
|
||||||
in a box in the top left of the screen.
|
in a box in the top left of the screen.
|
||||||
|
|
||||||
(PS: if you are a GSuite user, you could also select "Internal" instead
|
(PS: if you are a GSuite user, you could also select "Internal" instead
|
||||||
of "External" above, but this will restrict API use to Google Workspace
|
of "External" above, but this will restrict API use to Google Workspace
|
||||||
users in your organisation).
|
users in your organisation).
|
||||||
|
|
||||||
You will also have to add [some scopes](https://developers.google.com/drive/api/guides/api-specific-auth),
|
You will also have to add [some scopes](https://developers.google.com/drive/api/guides/api-specific-auth),
|
||||||
including
|
including
|
||||||
- `https://www.googleapis.com/auth/docs`
|
- `https://www.googleapis.com/auth/docs`
|
||||||
- `https://www.googleapis.com/auth/drive` in order to be able to edit,
|
- `https://www.googleapis.com/auth/drive` in order to be able to edit,
|
||||||
create and delete files with RClone.
|
create and delete files with RClone.
|
||||||
- `https://www.googleapis.com/auth/drive.metadata.readonly` which you may
|
- `https://www.googleapis.com/auth/drive.metadata.readonly` which you may also want to add.
|
||||||
also want to add.
|
|
||||||
|
|
||||||
To do this, click Data Access on the left side panel, click "add or
|
To do this, click Data Access on the left side panel, click "add or remove scopes" and select the three above and press update or go to the "Manually add scopes" text box (scroll down) and enter "https://www.googleapis.com/auth/docs,https://www.googleapis.com/auth/drive,https://www.googleapis.com/auth/drive.metadata.readonly", press add to table then update.
|
||||||
remove scopes" and select the three above and press update or go to the
|
|
||||||
"Manually add scopes" text box (scroll down) and enter
|
|
||||||
"https://www.googleapis.com/auth/docs,https://www.googleapis.com/auth/drive,https://www.googleapis.com/auth/drive.metadata.readonly", press add to table then update.
|
|
||||||
|
|
||||||
You should now see the three scopes on your Data access page. Now press save
|
You should now see the three scopes on your Data access page. Now press save at the bottom!
|
||||||
at the bottom!
|
|
||||||
|
|
||||||
6. After adding scopes, click Audience
|
6. After adding scopes, click Audience
|
||||||
Scroll down and click "+ Add users". Add yourself as a test user and press save.
|
Scroll down and click "+ Add users". Add yourself as a test user and press save.
|
||||||
|
|
||||||
7. Go to Overview on the left panel, click "Create OAuth client". Choose
|
7. Go to Overview on the left panel, click "Create OAuth client". Choose an application type of "Desktop app" and click "Create". (the default name is fine)
|
||||||
an application type of "Desktop app" and click "Create". (the default name is fine)
|
|
||||||
|
|
||||||
8. It will show you a client ID and client secret. Make a note of these.
|
8. It will show you a client ID and client secret. Make a note of these.
|
||||||
(If you selected "External" at Step 5 continue to Step 9.
|
|
||||||
|
(If you selected "External" at Step 5 continue to Step 9.
|
||||||
If you chose "Internal" you don't need to publish and can skip straight to
|
If you chose "Internal" you don't need to publish and can skip straight to
|
||||||
Step 10 but your destination drive must be part of the same Google Workspace.)
|
Step 10 but your destination drive must be part of the same Google Workspace.)
|
||||||
|
|
||||||
@@ -1951,10 +1941,9 @@ testing mode would also be sufficient.
|
|||||||
|
|
||||||
(Thanks to @balazer on github for these instructions.)
|
(Thanks to @balazer on github for these instructions.)
|
||||||
|
|
||||||
Sometimes, creation of an OAuth consent in Google API Console fails due to an
|
Sometimes, creation of an OAuth consent in Google API Console fails due to an error message
|
||||||
error message "The request failed because changes to one of the field of the
|
“The request failed because changes to one of the field of the resource is not supported”.
|
||||||
resource is not supported". As a convenient workaround, the necessary Google
|
As a convenient workaround, the necessary Google Drive API key can be created on the
|
||||||
Drive API key can be created on the
|
[Python Quickstart](https://developers.google.com/drive/api/v3/quickstart/python) page.
|
||||||
[Python Quickstart](https://developers.google.com/drive/api/v3/quickstart/python)
|
Just push the Enable the Drive API button to receive the Client ID and Secret.
|
||||||
page. Just push the Enable the Drive API button to receive the Client ID and Secret.
|
|
||||||
Note that it will automatically create a new project in the API Console.
|
Note that it will automatically create a new project in the API Console.
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ through it.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -73,19 +73,19 @@ You can then use it like this,
|
|||||||
|
|
||||||
List directories in top level of your dropbox
|
List directories in top level of your dropbox
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your dropbox
|
List all the files in your dropbox
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy a local directory to a dropbox directory called backup
|
To copy a local directory to a dropbox directory called backup
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy /home/source remote:backup
|
rclone copy /home/source remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -237,7 +237,7 @@ of supported formats at any time.
|
|||||||
| html | HTML | HTML document |
|
| html | HTML | HTML document |
|
||||||
| md | Markdown | Markdown text format |
|
| md | Markdown | Markdown text format |
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/dropbox/dropbox.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/dropbox/dropbox.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to dropbox (Dropbox).
|
Here are the Standard options specific to dropbox (Dropbox).
|
||||||
@@ -590,7 +590,7 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@@ -604,9 +604,10 @@ issue an error message `File name disallowed - not uploading` if it
|
|||||||
attempts to upload one of those file names, but the sync won't fail.
|
attempts to upload one of those file names, but the sync won't fail.
|
||||||
|
|
||||||
Some errors may occur if you try to sync copyright-protected files
|
Some errors may occur if you try to sync copyright-protected files
|
||||||
because Dropbox has its own [copyright detector](https://techcrunch.com/2014/03/30/how-dropbox-knows-when-youre-sharing-copyrighted-stuff-without-actually-looking-at-your-stuff/)
|
because Dropbox has its own [copyright detector](https://techcrunch.com/2014/03/30/how-dropbox-knows-when-youre-sharing-copyrighted-stuff-without-actually-looking-at-your-stuff/) that
|
||||||
that prevents this sort of file being downloaded. This will return the error
|
prevents this sort of file being downloaded. This will return the error `ERROR :
|
||||||
`ERROR : /path/to/your/file: Failed to copy: failed to open source object: path/restricted_content/.`
|
/path/to/your/file: Failed to copy: failed to open source object:
|
||||||
|
path/restricted_content/.`
|
||||||
|
|
||||||
If you have more than 10,000 files in a directory then `rclone purge
|
If you have more than 10,000 files in a directory then `rclone purge
|
||||||
dropbox:dir` will return the error `Failed to purge: There are too
|
dropbox:dir` will return the error `Failed to purge: There are too
|
||||||
@@ -616,8 +617,7 @@ many files involved in this operation`. As a work-around do an
|
|||||||
When using `rclone link` you'll need to set `--expire` if using a
|
When using `rclone link` you'll need to set `--expire` if using a
|
||||||
non-personal account otherwise the visibility may not be correct.
|
non-personal account otherwise the visibility may not be correct.
|
||||||
(Note that `--expire` isn't supported on personal accounts). See the
|
(Note that `--expire` isn't supported on personal accounts). See the
|
||||||
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211)
|
[forum discussion](https://forum.rclone.org/t/rclone-link-dropbox-permissions/23211) and the
|
||||||
and the
|
|
||||||
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
[dropbox SDK issue](https://github.com/dropbox/dropbox-sdk-go-unofficial/issues/75).
|
||||||
|
|
||||||
Modification times for Dropbox Paper documents are not exact, and
|
Modification times for Dropbox Paper documents are not exact, and
|
||||||
@@ -627,34 +627,23 @@ or so, or use `--ignore-times` to force a full sync.
|
|||||||
|
|
||||||
## Get your own Dropbox App ID
|
## Get your own Dropbox App ID
|
||||||
|
|
||||||
When you use rclone with Dropbox in its default configuration you are using
|
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
|
||||||
rclone's App ID. This is shared between all the rclone users.
|
|
||||||
|
|
||||||
Here is how to create your own Dropbox App ID for rclone:
|
Here is how to create your own Dropbox App ID for rclone:
|
||||||
|
|
||||||
1. Log into the [Dropbox App console](https://www.dropbox.com/developers/apps/create)
|
1. Log into the [Dropbox App console](https://www.dropbox.com/developers/apps/create) with your Dropbox Account (It need not
|
||||||
with your Dropbox Account (It need not to be the same account as the Dropbox you
|
to be the same account as the Dropbox you want to access)
|
||||||
want to access)
|
|
||||||
|
|
||||||
2. Choose an API => Usually this should be `Dropbox API`
|
2. Choose an API => Usually this should be `Dropbox API`
|
||||||
|
|
||||||
3. Choose the type of access you want to use => `Full Dropbox` or `App Folder`.
|
3. Choose the type of access you want to use => `Full Dropbox` or `App Folder`. If you want to use Team Folders, `Full Dropbox` is required ([see here](https://www.dropboxforum.com/t5/Dropbox-API-Support-Feedback/How-to-create-team-folder-inside-my-app-s-folder/m-p/601005/highlight/true#M27911)).
|
||||||
If you want to use Team Folders, `Full Dropbox` is required
|
|
||||||
([see here](https://www.dropboxforum.com/t5/Dropbox-API-Support-Feedback/How-to-create-team-folder-inside-my-app-s-folder/m-p/601005/highlight/true#M27911)).
|
|
||||||
|
|
||||||
4. Name your App. The app name is global, so you can't use `rclone` for example
|
4. Name your App. The app name is global, so you can't use `rclone` for example
|
||||||
|
|
||||||
5. Click the button `Create App`
|
5. Click the button `Create App`
|
||||||
|
|
||||||
6. Switch to the `Permissions` tab. Enable at least the following permissions:
|
6. Switch to the `Permissions` tab. Enable at least the following permissions: `account_info.read`, `files.metadata.write`, `files.content.write`, `files.content.read`, `sharing.write`. The `files.metadata.read` and `sharing.read` checkboxes will be marked too. Click `Submit`
|
||||||
`account_info.read`, `files.metadata.write`, `files.content.write`, `files.content.read`,
|
|
||||||
`sharing.write`. The `files.metadata.read` and `sharing.read` checkboxes will be
|
|
||||||
marked too. Click `Submit`
|
|
||||||
|
|
||||||
7. Switch to the `Settings` tab. Fill `OAuth2 - Redirect URIs` as `http://localhost:53682/`
|
7. Switch to the `Settings` tab. Fill `OAuth2 - Redirect URIs` as `http://localhost:53682/` and click on `Add`
|
||||||
and click on `Add`
|
|
||||||
|
|
||||||
8. Find the `App key` and `App secret` values on the `Settings` tab. Use these
|
8. Find the `App key` and `App secret` values on the `Settings` tab. Use these values in rclone config to add a new remote or edit an existing remote. The `App key` setting corresponds to `client_id` in rclone config, the `App secret` corresponds to `client_secret`
|
||||||
values in rclone config to add a new remote or edit an existing remote.
|
|
||||||
The `App key` setting corresponds to `client_id` in rclone config, the
|
|
||||||
`App secret` corresponds to `client_secret`
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ If you need to configure a remote, see the [config help docs](/docs/#configure).
|
|||||||
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
|
If you are using rclone entirely with [on the fly remotes](/docs/#backend-path-to-dir),
|
||||||
you can create an empty config file to get rid of this notice, for example:
|
you can create an empty config file to get rid of this notice, for example:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config touch
|
rclone config touch
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -48,7 +48,7 @@ The syncs would be incremental (on a file by file basis).
|
|||||||
|
|
||||||
e.g.
|
e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone sync --interactive drive:Folder s3:bucket
|
rclone sync --interactive drive:Folder s3:bucket
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ rclone sync --interactive drive:Folder s3:bucket
|
|||||||
You can use rclone from multiple places at the same time if you choose
|
You can use rclone from multiple places at the same time if you choose
|
||||||
different subdirectory for the output, e.g.
|
different subdirectory for the output, e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
Server A> rclone sync --interactive /tmp/whatever remote:ServerA
|
Server A> rclone sync --interactive /tmp/whatever remote:ServerA
|
||||||
Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
||||||
```
|
```
|
||||||
@@ -65,7 +65,7 @@ Server B> rclone sync --interactive /tmp/whatever remote:ServerB
|
|||||||
If you sync to the same directory then you should use rclone copy
|
If you sync to the same directory then you should use rclone copy
|
||||||
otherwise the two instances of rclone may delete each other's files, e.g.
|
otherwise the two instances of rclone may delete each other's files, e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
Server A> rclone copy /tmp/whatever remote:Backup
|
Server A> rclone copy /tmp/whatever remote:Backup
|
||||||
Server B> rclone copy /tmp/whatever remote:Backup
|
Server B> rclone copy /tmp/whatever remote:Backup
|
||||||
```
|
```
|
||||||
@@ -119,7 +119,7 @@ may use `http_proxy` but another one `HTTP_PROXY`. The `Go` libraries
|
|||||||
used by `rclone` will try both variations, but you may wish to set all
|
used by `rclone` will try both variations, but you may wish to set all
|
||||||
possibilities. So, on Linux, you may end up with code similar to
|
possibilities. So, on Linux, you may end up with code similar to
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export http_proxy=http://proxyserver:12345
|
export http_proxy=http://proxyserver:12345
|
||||||
export https_proxy=$http_proxy
|
export https_proxy=$http_proxy
|
||||||
export HTTP_PROXY=$http_proxy
|
export HTTP_PROXY=$http_proxy
|
||||||
@@ -128,7 +128,7 @@ export HTTPS_PROXY=$http_proxy
|
|||||||
|
|
||||||
Note: If the proxy server requires a username and password, then use
|
Note: If the proxy server requires a username and password, then use
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export http_proxy=http://username:password@proxyserver:12345
|
export http_proxy=http://username:password@proxyserver:12345
|
||||||
export https_proxy=$http_proxy
|
export https_proxy=$http_proxy
|
||||||
export HTTP_PROXY=$http_proxy
|
export HTTP_PROXY=$http_proxy
|
||||||
@@ -141,7 +141,7 @@ For instance "foo.com" also matches "bar.foo.com".
|
|||||||
|
|
||||||
e.g.
|
e.g.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
export no_proxy=localhost,127.0.0.0/8,my.host.name
|
export no_proxy=localhost,127.0.0.0/8,my.host.name
|
||||||
export NO_PROXY=$no_proxy
|
export NO_PROXY=$no_proxy
|
||||||
```
|
```
|
||||||
@@ -170,7 +170,7 @@ where `rclone` can't verify the server with the SSL root certificates.
|
|||||||
Rclone (via the Go runtime) tries to load the root certificates from
|
Rclone (via the Go runtime) tries to load the root certificates from
|
||||||
these places on Linux.
|
these places on Linux.
|
||||||
|
|
||||||
```text
|
```sh
|
||||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
|
||||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL
|
||||||
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
"/etc/ssl/ca-bundle.pem", // OpenSUSE
|
||||||
@@ -180,7 +180,7 @@ these places on Linux.
|
|||||||
So doing something like this should fix the problem. It also sets the
|
So doing something like this should fix the problem. It also sets the
|
||||||
time which is important for SSL to work properly.
|
time which is important for SSL to work properly.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
mkdir -p /etc/ssl/certs/
|
mkdir -p /etc/ssl/certs/
|
||||||
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
curl -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||||
ntpclient -s -h pool.ntp.org
|
ntpclient -s -h pool.ntp.org
|
||||||
@@ -193,7 +193,7 @@ provide the SSL root certificates on Unix systems other than macOS.
|
|||||||
Note that you may need to add the `--insecure` option to the `curl` command line
|
Note that you may need to add the `--insecure` option to the `curl` command line
|
||||||
if it doesn't work without.
|
if it doesn't work without.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
curl --insecure -o /etc/ssl/certs/ca-certificates.crt https://raw.githubusercontent.com/bagder/ca-bundle/master/ca-bundle.crt
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -202,7 +202,7 @@ On macOS, you can install
|
|||||||
Homebrew, and specify the SSL root certificates with the
|
Homebrew, and specify the SSL root certificates with the
|
||||||
[--ca-cert](/docs/#ca-cert-stringarray) flag.
|
[--ca-cert](/docs/#ca-cert-stringarray) flag.
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
brew install ca-certificates
|
brew install ca-certificates
|
||||||
find $(brew --prefix)/etc/ca-certificates -type f
|
find $(brew --prefix)/etc/ca-certificates -type f
|
||||||
```
|
```
|
||||||
@@ -260,7 +260,7 @@ the port on the host.
|
|||||||
|
|
||||||
A simple solution may be restarting the Host Network Service with eg. Powershell
|
A simple solution may be restarting the Host Network Service with eg. Powershell
|
||||||
|
|
||||||
```powershell
|
```pwsh
|
||||||
Restart-Service hns
|
Restart-Service hns
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ which you need to do in your browser.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -64,25 +64,23 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
Once configured you can then use `rclone` like this,
|
||||||
name you gave your remote):
|
|
||||||
|
|
||||||
|
|
||||||
List directories in top level of your 1Fichier account
|
List directories in top level of your 1Fichier account
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your 1Fichier account
|
List all the files in your 1Fichier account
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy a local directory to a 1Fichier directory called backup
|
To copy a local directory to a 1Fichier directory called backup
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy /home/source remote:backup
|
rclone copy /home/source remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -124,7 +122,7 @@ name:
|
|||||||
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
|
||||||
as they can't be used in JSON strings.
|
as they can't be used in JSON strings.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/fichier/fichier.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/fichier/fichier.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to fichier (1Fichier).
|
Here are the Standard options specific to fichier (1Fichier).
|
||||||
@@ -216,7 +214,7 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
@@ -225,5 +223,5 @@ this capability cannot determine free space for an rclone mount or
|
|||||||
use policy `mfs` (most free space) as a member of an rclone union
|
use policy `mfs` (most free space) as a member of an rclone union
|
||||||
remote.
|
remote.
|
||||||
|
|
||||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
|
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||||
and [rclone about](https://rclone.org/commands/rclone_about/).
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ do in your browser. `rclone config` walks you through it.
|
|||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone config
|
rclone config
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -89,24 +89,23 @@ d) Delete this remote
|
|||||||
y/e/d> y
|
y/e/d> y
|
||||||
```
|
```
|
||||||
|
|
||||||
Once configured you can then use `rclone` like this (replace `remote` with the
|
Once configured you can then use `rclone` like this,
|
||||||
name you gave your remote):
|
|
||||||
|
|
||||||
List directories in top level of your Enterprise File Fabric
|
List directories in top level of your Enterprise File Fabric
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone lsd remote:
|
rclone lsd remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
List all the files in your Enterprise File Fabric
|
List all the files in your Enterprise File Fabric
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone ls remote:
|
rclone ls remote:
|
||||||
```
|
```
|
||||||
|
|
||||||
To copy a local directory to an Enterprise File Fabric directory called backup
|
To copy a local directory to an Enterprise File Fabric directory called backup
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
rclone copy /home/source remote:backup
|
rclone copy /home/source remote:backup
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -149,7 +148,7 @@ In order to do this you will have to find the `Folder ID` of the
|
|||||||
directory you wish rclone to display. These aren't displayed in the
|
directory you wish rclone to display. These aren't displayed in the
|
||||||
web interface, but you can use `rclone lsf` to find them, for example
|
web interface, but you can use `rclone lsf` to find them, for example
|
||||||
|
|
||||||
```console
|
```sh
|
||||||
$ rclone lsf --dirs-only -Fip --csv filefabric:
|
$ rclone lsf --dirs-only -Fip --csv filefabric:
|
||||||
120673758,Burnt PDFs/
|
120673758,Burnt PDFs/
|
||||||
120673759,My Quick Uploads/
|
120673759,My Quick Uploads/
|
||||||
@@ -161,7 +160,7 @@ $ rclone lsf --dirs-only -Fip --csv filefabric:
|
|||||||
|
|
||||||
The ID for "S3 Storage" would be `120673761`.
|
The ID for "S3 Storage" would be `120673761`.
|
||||||
|
|
||||||
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/filefabric/filefabric.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/filefabric/filefabric.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
Here are the Standard options specific to filefabric (Enterprise File Fabric).
|
Here are the Standard options specific to filefabric (Enterprise File Fabric).
|
||||||
@@ -294,4 +293,4 @@ Properties:
|
|||||||
- Type: string
|
- Type: string
|
||||||
- Required: false
|
- Required: false
|
||||||
|
|
||||||
<!-- autogenerated options stop -->
|
{{< rem autogenerated options stop >}}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user