mirror of
https://github.com/rclone/rclone.git
synced 2026-02-03 10:13:22 +00:00
Compare commits
35 Commits
v1.63-stab
...
pr-6561-vf
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
064b9af95a | ||
|
|
5910ba5aa9 | ||
|
|
0c64075f57 | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 |
35
.github/workflows/build.yml
vendored
35
.github/workflows/build.yml
vendored
@@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -130,6 +130,11 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
@@ -239,7 +244,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -264,7 +269,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
348
MANUAL.html
generated
348
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
104
MANUAL.md
generated
104
MANUAL.md
generated
@@ -1,6 +1,6 @@
|
||||
% rclone(1) User Manual
|
||||
% Nick Craig-Wood
|
||||
% Jul 17, 2023
|
||||
% Jun 30, 2023
|
||||
|
||||
# Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
|
||||
Rclone is a command-line program to manage files on cloud storage. It
|
||||
is a feature-rich alternative to cloud vendors' web storage
|
||||
interfaces. [Over 70 cloud storage products](#providers) support
|
||||
interfaces. [Over 40 cloud storage products](#providers) support
|
||||
rclone including S3 object stores, business & consumer file storage
|
||||
services, as well as standard transfer protocols.
|
||||
|
||||
@@ -2723,40 +2723,6 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
|
||||
|
||||
* [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
# rclone config redacted
|
||||
|
||||
Print redacted (decrypted) config file, or the redacted config for a single remote.
|
||||
|
||||
# Synopsis
|
||||
|
||||
This prints a redacted copy of the config file, either the
|
||||
whole config file or for a given remote.
|
||||
|
||||
The config file will be redacted by replacing all passwords and other
|
||||
sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
|
||||
|
||||
```
|
||||
rclone config redacted [<remote>] [flags]
|
||||
```
|
||||
|
||||
# Options
|
||||
|
||||
```
|
||||
-h, --help help for redacted
|
||||
```
|
||||
|
||||
See the [global flags page](https://rclone.org/flags/) for global options not listed here.
|
||||
|
||||
# SEE ALSO
|
||||
|
||||
* [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session.
|
||||
|
||||
# rclone config show
|
||||
|
||||
Print (decrypted) config file, or the config for a single remote.
|
||||
@@ -11886,7 +11852,7 @@ all files on `remote:` excluding those in root directory `dir` and sub
|
||||
directories.
|
||||
|
||||
E.g. on Microsoft Windows `rclone ls remote: --exclude "*\[{JP,KR,HK}\]*"`
|
||||
lists the files in `remote:` without `[JP]` or `[KR]` or `[HK]` in
|
||||
lists the files in `remote:` with `[JP]` or `[KR]` or `[HK]` in
|
||||
their name. Quotes prevent the shell from interpreting the `\`
|
||||
characters.`\` characters escape the `[` and `]` so an rclone filter
|
||||
treats them literally rather than as a character-range. The `{` and `}`
|
||||
@@ -15228,7 +15194,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.1")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
@@ -27924,7 +27890,7 @@ to be the same account as the Dropbox you want to access)
|
||||
|
||||
6. Switch to the `Permissions` tab. Enable at least the following permissions: `account_info.read`, `files.metadata.write`, `files.content.write`, `files.content.read`, `sharing.write`. The `files.metadata.read` and `sharing.read` checkboxes will be marked too. Click `Submit`
|
||||
|
||||
7. Switch to the `Settings` tab. Fill `OAuth2 - Redirect URIs` as `http://localhost:53682/` and click on `Add`
|
||||
7. Switch to the `Settings` tab. Fill `OAuth2 - Redirect URIs` as `http://localhost:53682/`
|
||||
|
||||
8. Find the `App key` and `App secret` values on the `Settings` tab. Use these values in rclone config to add a new remote or edit an existing remote. The `App key` setting corresponds to `client_id` in rclone config, the `App secret` corresponds to `client_secret`
|
||||
|
||||
@@ -30908,7 +30874,7 @@ be the same account as the Google Drive you want to access)
|
||||
"Google Drive API".
|
||||
|
||||
4. Click "Credentials" in the left-side panel (not "Create
|
||||
credentials", which opens the wizard).
|
||||
credentials", which opens the wizard), then "Create credentials"
|
||||
|
||||
5. If you already configured an "Oauth Consent Screen", then skip
|
||||
to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
||||
@@ -33730,6 +33696,8 @@ y/e/d> y
|
||||
|
||||
[Mail.ru Cloud](https://cloud.mail.ru/) is a cloud storage provided by a Russian internet company [Mail.Ru Group](https://mail.ru). The official desktop client is [Disk-O:](https://disk-o.cloud/en), available on Windows and Mac OS.
|
||||
|
||||
Currently it is recommended to disable 2FA on Mail.ru accounts intended for rclone until it gets eventually implemented.
|
||||
|
||||
## Features highlights
|
||||
|
||||
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`
|
||||
@@ -43602,27 +43570,6 @@ Options:
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.63.1 - 2023-07-17
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.63.0...v1.63.1)
|
||||
|
||||
* Bug Fixes
|
||||
* build: Fix macos builds for versions < 12 (Anagh Kumar Baranwal)
|
||||
* dirtree: Fix performance with large directories of directories and `--fast-list` (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix deadlock when using `lsd`/`ls` with `--progress` (Nick Craig-Wood)
|
||||
* Fix `.rclonelink` files not being converted back to symlinks (Nick Craig-Wood)
|
||||
* doc fixes (Dean Attali, Mahad, Nick Craig-Wood, Sawada Tsunayoshi, Vladislav Vorobev)
|
||||
* Local
|
||||
* Fix partial directory read for corrupted filesystem (Nick Craig-Wood)
|
||||
* Box
|
||||
* Fix reconnect failing with HTTP 400 Bad Request (albertony)
|
||||
* Smb
|
||||
* Fix "Statfs failed: bucket or container name is needed" when mounting (Nick Craig-Wood)
|
||||
* WebDAV
|
||||
* Nextcloud: fix must use /dav/files/USER endpoint not /webdav error (Paul)
|
||||
* Nextcloud chunking: add more guidance for the user to check the config (darix)
|
||||
|
||||
## v1.63.0 - 2023-06-30
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.62.0...v1.63.0)
|
||||
@@ -49286,43 +49233,32 @@ put them back in again.` >}}
|
||||
* zzq <i@zhangzqs.cn>
|
||||
* mac-15 <usman.ilamdin@phpstudios.com>
|
||||
|
||||
# Contact the rclone project
|
||||
# Contact the rclone project #
|
||||
|
||||
## Forum
|
||||
## Forum ##
|
||||
|
||||
Forum for questions and general discussion:
|
||||
|
||||
- https://forum.rclone.org
|
||||
* https://forum.rclone.org
|
||||
|
||||
## Business support
|
||||
|
||||
For business support or sponsorship enquiries please see:
|
||||
|
||||
- https://rclone.com/
|
||||
- sponsorship@rclone.com
|
||||
|
||||
## GitHub repository
|
||||
## GitHub repository ##
|
||||
|
||||
The project's repository is located at:
|
||||
|
||||
- https://github.com/rclone/rclone
|
||||
* https://github.com/rclone/rclone
|
||||
|
||||
There you can file bug reports or contribute with pull requests.
|
||||
|
||||
## Twitter
|
||||
## Twitter ##
|
||||
|
||||
You can also follow Nick on twitter for rclone announcements:
|
||||
You can also follow me on twitter for rclone announcements:
|
||||
|
||||
- [@njcw](https://twitter.com/njcw)
|
||||
* [@njcw](https://twitter.com/njcw)
|
||||
|
||||
## Email
|
||||
## Email ##
|
||||
|
||||
Or if all else fails or you want to ask something private or
|
||||
confidential
|
||||
|
||||
- info@rclone.com
|
||||
|
||||
Please don't email requests for help to this address - those are
|
||||
better directed to the forum unless you'd like to sign up for business
|
||||
support.
|
||||
confidential email [Nick Craig-Wood](mailto:nick@craig-wood.com).
|
||||
Please don't email me requests for help - those are better directed to
|
||||
the forum. Thanks!
|
||||
|
||||
|
||||
91
MANUAL.txt
generated
91
MANUAL.txt
generated
@@ -1,6 +1,6 @@
|
||||
rclone(1) User Manual
|
||||
Nick Craig-Wood
|
||||
Jul 17, 2023
|
||||
Jun 30, 2023
|
||||
|
||||
Rclone syncs your files to cloud storage
|
||||
|
||||
@@ -16,7 +16,7 @@ About rclone
|
||||
|
||||
Rclone is a command-line program to manage files on cloud storage. It is
|
||||
a feature-rich alternative to cloud vendors' web storage interfaces.
|
||||
Over 70 cloud storage products support rclone including S3 object
|
||||
Over 40 cloud storage products support rclone including S3 object
|
||||
stores, business & consumer file storage services, as well as standard
|
||||
transfer protocols.
|
||||
|
||||
@@ -2481,36 +2481,6 @@ SEE ALSO
|
||||
|
||||
- rclone config - Enter an interactive configuration session.
|
||||
|
||||
rclone config redacted
|
||||
|
||||
Print redacted (decrypted) config file, or the redacted config for a
|
||||
single remote.
|
||||
|
||||
Synopsis
|
||||
|
||||
This prints a redacted copy of the config file, either the whole config
|
||||
file or for a given remote.
|
||||
|
||||
The config file will be redacted by replacing all passwords and other
|
||||
sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be
|
||||
perfect.
|
||||
|
||||
rclone config redacted [<remote>] [flags]
|
||||
|
||||
Options
|
||||
|
||||
-h, --help help for redacted
|
||||
|
||||
See the global flags page for global options not listed here.
|
||||
|
||||
SEE ALSO
|
||||
|
||||
- rclone config - Enter an interactive configuration session.
|
||||
|
||||
rclone config show
|
||||
|
||||
Print (decrypted) config file, or the config for a single remote.
|
||||
@@ -11425,7 +11395,7 @@ all files on remote: excluding those in root directory dir and sub
|
||||
directories.
|
||||
|
||||
E.g. on Microsoft Windows rclone ls remote: --exclude "*\[{JP,KR,HK}\]*"
|
||||
lists the files in remote: without [JP] or [KR] or [HK] in their name.
|
||||
lists the files in remote: with [JP] or [KR] or [HK] in their name.
|
||||
Quotes prevent the shell from interpreting the \ characters.\ characters
|
||||
escape the [ and ] so an rclone filter treats them literally rather than
|
||||
as a character-range. The { and } define an rclone pattern list. For
|
||||
@@ -14768,7 +14738,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.1")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
|
||||
Backend Flags
|
||||
@@ -27270,7 +27240,7 @@ Here is how to create your own Dropbox App ID for rclone:
|
||||
Click Submit
|
||||
|
||||
7. Switch to the Settings tab. Fill OAuth2 - Redirect URIs as
|
||||
http://localhost:53682/ and click on Add
|
||||
http://localhost:53682/
|
||||
|
||||
8. Find the App key and App secret values on the Settings tab. Use
|
||||
these values in rclone config to add a new remote or edit an
|
||||
@@ -30268,7 +30238,7 @@ Here is how to create your own Google Drive client ID for rclone:
|
||||
"Google Drive API".
|
||||
|
||||
4. Click "Credentials" in the left-side panel (not "Create
|
||||
credentials", which opens the wizard).
|
||||
credentials", which opens the wizard), then "Create credentials"
|
||||
|
||||
5. If you already configured an "Oauth Consent Screen", then skip to
|
||||
the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
|
||||
@@ -33131,6 +33101,9 @@ Mail.ru Cloud is a cloud storage provided by a Russian internet company
|
||||
Mail.Ru Group. The official desktop client is Disk-O:, available on
|
||||
Windows and Mac OS.
|
||||
|
||||
Currently it is recommended to disable 2FA on Mail.ru accounts intended
|
||||
for rclone until it gets eventually implemented.
|
||||
|
||||
Features highlights
|
||||
|
||||
- Paths may be as deep as required, e.g. remote:directory/subdirectory
|
||||
@@ -43095,35 +43068,6 @@ Options:
|
||||
|
||||
Changelog
|
||||
|
||||
v1.63.1 - 2023-07-17
|
||||
|
||||
See commits
|
||||
|
||||
- Bug Fixes
|
||||
- build: Fix macos builds for versions < 12 (Anagh Kumar Baranwal)
|
||||
- dirtree: Fix performance with large directories of directories
|
||||
and --fast-list (Nick Craig-Wood)
|
||||
- operations
|
||||
- Fix deadlock when using lsd/ls with --progress (Nick
|
||||
Craig-Wood)
|
||||
- Fix .rclonelink files not being converted back to symlinks
|
||||
(Nick Craig-Wood)
|
||||
- doc fixes (Dean Attali, Mahad, Nick Craig-Wood, Sawada
|
||||
Tsunayoshi, Vladislav Vorobev)
|
||||
- Local
|
||||
- Fix partial directory read for corrupted filesystem (Nick
|
||||
Craig-Wood)
|
||||
- Box
|
||||
- Fix reconnect failing with HTTP 400 Bad Request (albertony)
|
||||
- Smb
|
||||
- Fix "Statfs failed: bucket or container name is needed" when
|
||||
mounting (Nick Craig-Wood)
|
||||
- WebDAV
|
||||
- Nextcloud: fix must use /dav/files/USER endpoint not /webdav
|
||||
error (Paul)
|
||||
- Nextcloud chunking: add more guidance for the user to check the
|
||||
config (darix)
|
||||
|
||||
v1.63.0 - 2023-06-30
|
||||
|
||||
See commits
|
||||
@@ -50216,13 +50160,6 @@ Forum for questions and general discussion:
|
||||
|
||||
- https://forum.rclone.org
|
||||
|
||||
Business support
|
||||
|
||||
For business support or sponsorship enquiries please see:
|
||||
|
||||
- https://rclone.com/
|
||||
- sponsorship@rclone.com
|
||||
|
||||
GitHub repository
|
||||
|
||||
The project's repository is located at:
|
||||
@@ -50233,16 +50170,12 @@ There you can file bug reports or contribute with pull requests.
|
||||
|
||||
Twitter
|
||||
|
||||
You can also follow Nick on twitter for rclone announcements:
|
||||
You can also follow me on twitter for rclone announcements:
|
||||
|
||||
- [@njcw](https://twitter.com/njcw)
|
||||
|
||||
Email
|
||||
|
||||
Or if all else fails or you want to ask something private or
|
||||
confidential
|
||||
|
||||
- info@rclone.com
|
||||
|
||||
Please don't email requests for help to this address - those are better
|
||||
directed to the forum unless you'd like to sign up for business support.
|
||||
confidential email Nick Craig-Wood. Please don't email me requests for
|
||||
help - those are better directed to the forum. Thanks!
|
||||
|
||||
@@ -51,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -84,6 +85,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
@@ -95,6 +95,7 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
|
||||
If this is blank and if env_auth is set it will be read from the
|
||||
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
||||
@@ -106,11 +107,13 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
|
||||
Help: `Storage Account Shared Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: `SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
@@ -120,6 +123,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_id",
|
||||
Help: `The ID of the client in use.
|
||||
@@ -129,6 +133,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_secret",
|
||||
Help: `One of the service principal's client secrets
|
||||
@@ -136,6 +141,7 @@ Set this if using
|
||||
Set this if using
|
||||
- Service principal with client secret
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_certificate_path",
|
||||
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
||||
@@ -173,7 +179,8 @@ Optionally set this if using
|
||||
Set this if using
|
||||
- User with username and password
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: `The user's password
|
||||
@@ -216,17 +223,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -75,13 +75,15 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
|
||||
@@ -107,16 +107,18 @@ func init() {
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
|
||||
14
backend/cache/cache.go
vendored
14
backend/cache/cache.go
vendored
@@ -76,17 +76,19 @@ func init() {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
@@ -277,20 +277,23 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
@@ -416,10 +419,11 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
@@ -592,7 +596,8 @@ Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
`,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
@@ -38,8 +38,9 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
|
||||
@@ -84,6 +84,7 @@ Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
@@ -106,7 +108,8 @@ usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
|
||||
@@ -48,13 +48,15 @@ func init() {
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
|
||||
@@ -91,18 +91,21 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
|
||||
@@ -19,9 +19,10 @@ func init() {
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -29,6 +30,7 @@ func init() {
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
@@ -36,7 +38,8 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
|
||||
Name: "endpoint",
|
||||
|
||||
@@ -74,6 +74,10 @@ const (
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -84,7 +88,7 @@ func init() {
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -119,7 +123,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,6 +143,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -261,6 +268,21 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
|
||||
@@ -61,9 +61,10 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
|
||||
@@ -85,10 +85,11 @@ func init() {
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -58,9 +58,10 @@ func init() {
|
||||
Description: "Mega",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
|
||||
@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret",
|
||||
Help: `Set the NetStorage account secret/G2O key for authentication.
|
||||
|
||||
@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
@@ -260,7 +262,8 @@ this flag there.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
|
||||
@@ -42,9 +42,10 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
|
||||
@@ -92,14 +92,16 @@ func newOptions() []fs.Option {
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
|
||||
@@ -110,10 +110,11 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
@@ -138,7 +139,8 @@ with rclone authorize.
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
|
||||
@@ -158,9 +158,10 @@ func init() {
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Pikpak password.",
|
||||
@@ -173,7 +174,8 @@ Leave blank normally.
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
|
||||
@@ -82,14 +82,15 @@ func init() {
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: `API Key.
|
||||
|
||||
This is not normally used - use oauth instead.
|
||||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func init() {
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -77,7 +77,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -49,11 +49,13 @@ func init() {
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
||||
|
||||
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Leviia, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -127,6 +127,9 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Leviia",
|
||||
Help: "Leviia Object Storage",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
@@ -154,6 +157,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Storj",
|
||||
Help: "Storj (S3 Compatible Gateway)",
|
||||
}, {
|
||||
Value: "Synology",
|
||||
Help: "Synology C2 Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
@@ -179,11 +185,13 @@ func init() {
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
@@ -463,10 +471,30 @@ func init() {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your data stored.\n",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001",
|
||||
Help: "Europe Region 1",
|
||||
}, {
|
||||
Value: "eu-002",
|
||||
Help: "Europe Region 2",
|
||||
}, {
|
||||
Value: "us-001",
|
||||
Help: "US Region 1",
|
||||
}, {
|
||||
Value: "us-002",
|
||||
Help: "US Region 2",
|
||||
}, {
|
||||
Value: "tw-001",
|
||||
Help: "Asia (Taiwan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -815,6 +843,15 @@ func init() {
|
||||
Value: "s3.sa-east-1.petabox.io",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
// Leviia endpoints: https://www.leviia.com/object-storage/
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Leviia Object Storage API.",
|
||||
Provider: "Leviia",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.leviia.com",
|
||||
Help: "The default endpoint\nLeviia",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
@@ -1000,6 +1037,26 @@ func init() {
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Synology C2 Object Storage API.",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 1",
|
||||
}, {
|
||||
Value: "eu-002.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 2",
|
||||
}, {
|
||||
Value: "us-001.s3.synologyc2.net",
|
||||
Help: "US Endpoint 1",
|
||||
}, {
|
||||
Value: "us-002.s3.synologyc2.net",
|
||||
Help: "US Endpoint 2",
|
||||
}, {
|
||||
Value: "tw-001.s3.synologyc2.net",
|
||||
Help: "TW Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
@@ -1156,7 +1213,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1644,7 +1701,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1659,7 +1716,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Cloudflare",
|
||||
Provider: "!Storj,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1775,6 +1832,7 @@ header is added and the default (private) will be used.
|
||||
Value: "arn:aws:kms:us-east-1:*",
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
@@ -1786,6 +1844,7 @@ Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
@@ -1797,6 +1856,7 @@ Alternatively you can provide --sse-customer-key.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
@@ -1809,6 +1869,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
@@ -2050,9 +2111,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -2969,11 +3031,15 @@ func setQuirks(opt *Options) {
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Leviia":
|
||||
// No quirks
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
|
||||
@@ -67,15 +67,18 @@ func init() {
|
||||
Value: "https://cloud.seafile.com/",
|
||||
Help: "Connect to cloud.seafile.com.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// Password is not required, it will be left blank for 2FA
|
||||
Name: configPassword,
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config2FA,
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
|
||||
@@ -87,6 +90,7 @@ func init() {
|
||||
Name: configLibraryKey,
|
||||
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configCreateLibrary,
|
||||
Help: "Should rclone create a library if it doesn't exist.",
|
||||
@@ -94,9 +98,10 @@ func init() {
|
||||
Default: false,
|
||||
}, {
|
||||
// Keep the authentication token after entering the 2FA code
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -59,13 +59,15 @@ func init() {
|
||||
Description: "SSH/SFTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
@@ -75,8 +77,9 @@ func init() {
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
@@ -87,6 +90,7 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
|
||||
@@ -155,7 +155,7 @@ func init() {
|
||||
CheckAuth: checkAuth,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: defaultUploadCutoff,
|
||||
@@ -182,6 +182,7 @@ standard values here or any folder ID (long hex number ID).`,
|
||||
Value: "top",
|
||||
Help: "Access the home, favorites, and shared folders as well as the connectors.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
@@ -216,7 +217,7 @@ be set manually to something like: https://XXX.sharefile.com
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,8 @@ func init() {
|
||||
|
||||
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
|
||||
Keep default if Sia daemon runs on localhost.`,
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "api_password",
|
||||
Help: `Sia Daemon API Password.
|
||||
|
||||
@@ -41,13 +41,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
@@ -57,9 +59,10 @@ func init() {
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
@@ -71,6 +74,7 @@ authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
|
||||
@@ -98,9 +98,10 @@ func init() {
|
||||
},
|
||||
}},
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
@@ -120,14 +121,16 @@ func init() {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -132,42 +132,50 @@ func init() {
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}, Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files if true\notherwise put them in the deleted files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization_expiry",
|
||||
Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -116,11 +116,13 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth",
|
||||
Help: "Authentication URL for server (OS_AUTH_URL).",
|
||||
@@ -147,20 +149,25 @@ func init() {
|
||||
Help: "Blomp Cloud Storage",
|
||||
}},
|
||||
}, {
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region name - optional (OS_REGION_NAME).",
|
||||
@@ -168,17 +175,21 @@ func init() {
|
||||
Name: "storage_url",
|
||||
Help: "Storage URL - optional (OS_STORAGE_URL).",
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
|
||||
|
||||
@@ -43,8 +43,9 @@ func init() {
|
||||
Description: "Uptobox",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "Set to make uploaded files private",
|
||||
Name: "private",
|
||||
|
||||
@@ -96,15 +96,17 @@ func init() {
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "bearer_token_command",
|
||||
Help: "Command to run to get a bearer token.",
|
||||
|
||||
@@ -26,6 +26,7 @@ func init() {
|
||||
configCommand.AddCommand(configTouchCommand)
|
||||
configCommand.AddCommand(configPathsCommand)
|
||||
configCommand.AddCommand(configShowCommand)
|
||||
configCommand.AddCommand(configRedactedCommand)
|
||||
configCommand.AddCommand(configDumpCommand)
|
||||
configCommand.AddCommand(configProvidersCommand)
|
||||
configCommand.AddCommand(configCreateCommand)
|
||||
@@ -118,6 +119,35 @@ var configShowCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var configRedactedCommand = &cobra.Command{
|
||||
Use: "redacted [<remote>]",
|
||||
Short: `Print redacted (decrypted) config file, or the redacted config for a single remote.`,
|
||||
Long: `This prints a redacted copy of the config file, either the
|
||||
whole config file or for a given remote.
|
||||
|
||||
The config file will be redacted by replacing all passwords and other
|
||||
sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.64",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
config.ShowRedactedConfig()
|
||||
} else {
|
||||
name := strings.TrimRight(args[0], ":")
|
||||
config.ShowRedactedRemote(name)
|
||||
}
|
||||
fmt.Println("### Double check the config for sensitive info before posting publicly")
|
||||
},
|
||||
}
|
||||
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
|
||||
@@ -170,6 +170,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
|
||||
{{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
|
||||
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
|
||||
|
||||
@@ -738,3 +738,11 @@ put them back in again.` >}}
|
||||
* Peter Fern <github@0xc0dedbad.com>
|
||||
* zzq <i@zhangzqs.cn>
|
||||
* mac-15 <usman.ilamdin@phpstudios.com>
|
||||
* Sawada Tsunayoshi <34431649+TsunayoshiSawada@users.noreply.github.com>
|
||||
* Dean Attali <daattali@gmail.com>
|
||||
* Fjodor42 <molgaard@gmail.com>
|
||||
* BakaWang <wa11579@hotmail.com>
|
||||
* Mahad <56235065+Mahad-lab@users.noreply.github.com>
|
||||
* Vladislav Vorobev <x.miere@gmail.com>
|
||||
* darix <darix@users.noreply.github.com>
|
||||
* Benjamin <36415086+bbenjamin-sys@users.noreply.github.com>
|
||||
|
||||
@@ -174,7 +174,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.1")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.63.0")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
```
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@ it also provides white-label solutions to different companies, such as:
|
||||
* Telia Sky (sky.telia.no)
|
||||
* Tele2
|
||||
* Tele2 Cloud (mittcloud.tele2.se)
|
||||
* Onlime
|
||||
* Onlime Cloud Storage (onlime.dk)
|
||||
* Elkjøp (with subsidiaries):
|
||||
* Elkjøp Cloud (cloud.elkjop.no)
|
||||
* Elgiganten Sweden (cloud.elgiganten.se)
|
||||
@@ -84,6 +86,18 @@ Tele2 Cloud customers as no support for creating a CLI token exists, and additio
|
||||
authentication flow where the username is generated internally. To setup rclone to use Tele2 Cloud,
|
||||
choose Tele2 Cloud authentication in the setup. The rest of the setup is identical to the default setup.
|
||||
|
||||
### Onlime Cloud Storage authentication
|
||||
|
||||
Onlime has sold access to Jottacloud proper, while providing localized support to Danish Customers, but
|
||||
have recently set up their own hosting, transferring their customers from Jottacloud servers to their
|
||||
own ones.
|
||||
|
||||
This, of course, necessitates using their servers for authentication, but otherwise functionality and
|
||||
architecture seems equivalent to Jottacloud.
|
||||
|
||||
To setup rclone to use Onlime Cloud Storage, choose Onlime Cloud authentication in the setup. The rest
|
||||
of the setup is identical to the default setup.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make a remote called `remote` with the default setup. First run:
|
||||
@@ -127,6 +141,9 @@ Press Enter for the default (standard).
|
||||
/ Tele2 Cloud authentication.
|
||||
4 | Use this if you are using Tele2 Cloud.
|
||||
\ (tele2)
|
||||
/ Onlime Cloud authentication.
|
||||
5 | Use this if you are using Onlime Cloud.
|
||||
\ (onlime)
|
||||
config_type> 1
|
||||
Personal login token.
|
||||
Generate here: https://www.jottacloud.com/web/secure
|
||||
|
||||
@@ -22,6 +22,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
|
||||
{{< provider name="Leviia Object Storage" home="https://www.leviia.com/object-storage/" config="/s3/#leviia" >}}
|
||||
{{< provider name="Liara Object Storage" home="https://liara.ir/landing/object-storage" config="/s3/#liara-cloud" >}}
|
||||
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
@@ -32,6 +33,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
|
||||
{{< provider name="Storj" home="https://storj.io/" config="/s3/#storj" >}}
|
||||
{{< provider name="Synology C2 Object Storage" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}}
|
||||
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
|
||||
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
|
||||
{{< /provider_list >}}
|
||||
@@ -649,7 +651,7 @@ A simple solution is to set the `--s3-upload-cutoff 0` and force all the files t
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
|
||||
Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi).
|
||||
|
||||
#### --s3-provider
|
||||
|
||||
@@ -708,6 +710,8 @@ Properties:
|
||||
- StackPath Object Storage
|
||||
- "Storj"
|
||||
- Storj (S3 Compatible Gateway)
|
||||
- "Synology"
|
||||
- Synology C2 Object Storage
|
||||
- "TencentCOS"
|
||||
- Tencent Cloud Object Storage (COS)
|
||||
- "Wasabi"
|
||||
@@ -1069,7 +1073,7 @@ Properties:
|
||||
|
||||
- Config: region
|
||||
- Env Var: RCLONE_S3_REGION
|
||||
- Provider: !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive
|
||||
- Provider: !AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
@@ -1565,6 +1569,29 @@ Properties:
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for Synology C2 Object Storage API.
|
||||
|
||||
Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Provider: Synology
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
- "eu-001.s3.synologyc2.net"
|
||||
- Europe Region 1
|
||||
- "eu-002.s3.synologyc2.net"
|
||||
- Europe Region 2
|
||||
- "us-001.s3.synologyc2.net"
|
||||
- US Region 1
|
||||
- "us-002.s3.synologyc2.net"
|
||||
- US Region 2
|
||||
- "tw-001.s3.synologyc2.net"
|
||||
- Asia Region (Taiwan)
|
||||
|
||||
#### --s3-endpoint
|
||||
|
||||
Endpoint for Tencent COS API.
|
||||
|
||||
Properties:
|
||||
@@ -1702,7 +1729,7 @@ Properties:
|
||||
|
||||
- Config: endpoint
|
||||
- Env Var: RCLONE_S3_ENDPOINT
|
||||
- Provider: !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox
|
||||
- Provider: !AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox
|
||||
- Type: string
|
||||
- Required: false
|
||||
- Examples:
|
||||
@@ -2368,7 +2395,7 @@ Properties:
|
||||
|
||||
### Advanced options
|
||||
|
||||
Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi).
|
||||
Here are the Advanced options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi).
|
||||
|
||||
#### --s3-bucket-acl
|
||||
|
||||
@@ -3421,7 +3448,7 @@ Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
...
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
|
||||
\ (s3)
|
||||
...
|
||||
Storage> s3
|
||||
@@ -3605,7 +3632,7 @@ Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> 5
|
||||
@@ -3900,7 +3927,7 @@ Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
@@ -4006,7 +4033,7 @@ Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
@@ -4252,7 +4279,7 @@ Choose a number from below, or type in your own value
|
||||
\ (alias)
|
||||
4 / Amazon Drive
|
||||
\ (amazon cloud drive)
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
@@ -5132,7 +5159,129 @@ e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
### Leviia Cloud Object Storage {#leviia}
|
||||
|
||||
[Leviia Object Storage](https://www.leviia.com/object-storage/), backup and secure your data in a 100% French cloud, independent of GAFAM..
|
||||
|
||||
To configure access to Leviia, follow the steps below:
|
||||
|
||||
1. Run `rclone config` and select `n` for a new remote.
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
```
|
||||
|
||||
2. Give the name of the configuration. For example, name it 'leviia'.
|
||||
|
||||
```
|
||||
name> leviia
|
||||
```
|
||||
|
||||
3. Select `s3` storage.
|
||||
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / 1Fichier
|
||||
\ (fichier)
|
||||
2 / Akamai NetStorage
|
||||
\ (netstorage)
|
||||
3 / Alias for an existing remote
|
||||
\ (alias)
|
||||
4 / Amazon Drive
|
||||
\ (amazon cloud drive)
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
```
|
||||
|
||||
4. Select `Leviia` provider.
|
||||
```
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Amazon Web Services (AWS) S3
|
||||
\ "AWS"
|
||||
[snip]
|
||||
15 / Leviia Object Storage
|
||||
\ (Leviia)
|
||||
[snip]
|
||||
provider> Leviia
|
||||
```
|
||||
|
||||
5. Enter your SecretId and SecretKey of Leviia.
|
||||
|
||||
```
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
env_auth> 1
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
access_key_id> ZnIx.xxxxxxxxxxxxxxx
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
secret_access_key> xxxxxxxxxxx
|
||||
```
|
||||
|
||||
6. Select endpoint for Leviia.
|
||||
|
||||
```
|
||||
/ The default endpoint
|
||||
1 | Leviia.
|
||||
\ (s3.leviia.com)
|
||||
[snip]
|
||||
endpoint> 1
|
||||
|
||||
7. Choose acl.
|
||||
|
||||
```
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
[snip]
|
||||
acl> 1
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
Remote config
|
||||
--------------------
|
||||
[leviia]
|
||||
- type: s3
|
||||
- provider: Leviia
|
||||
- access_key_id: ZnIx.xxxxxxx
|
||||
- secret_access_key: xxxxxxxx
|
||||
- endpoint: s3.leviia.com
|
||||
- acl: private
|
||||
--------------------
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
leviia s3
|
||||
```
|
||||
### Liara {#liara-cloud}
|
||||
|
||||
Here is an example of making a [Liara Object Storage](https://liara.ir/landing/object-storage)
|
||||
@@ -5749,3 +5898,137 @@ use policy `mfs` (most free space) as a member of an rclone union
|
||||
remote.
|
||||
|
||||
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
|
||||
|
||||
|
||||
|
||||
### Synology C2 Object Storage {#synology-c2}
|
||||
|
||||
[Synology C2 Object Storage](https://c2.synology.com/en-global/object-storage/overview) provides a secure, S3-compatible, and cost-effective cloud storage solution without API request, download fees, and deletion penalty.
|
||||
|
||||
The S3 compatible gateway is configured using `rclone config` with a
|
||||
type of `s3` and with a provider name of `Synology`. Here is an example
|
||||
run of the configurator.
|
||||
|
||||
First run:
|
||||
|
||||
```
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.1
|
||||
name> syno
|
||||
|
||||
Type of storage to configure.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
|
||||
5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
|
||||
\ "s3"
|
||||
|
||||
Storage> s3
|
||||
|
||||
Choose your S3 provider.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
Choose a number from below, or type in your own value
|
||||
24 / Synology C2 Object Storage
|
||||
\ (Synology)
|
||||
|
||||
provider> Synology
|
||||
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Enter a boolean value (true or false). Press Enter for the default ("false").
|
||||
Choose a number from below, or type in your own value
|
||||
1 / Enter AWS credentials in the next step
|
||||
\ "false"
|
||||
2 / Get AWS credentials from the environment (env vars or IAM)
|
||||
\ "true"
|
||||
|
||||
env_auth> 1
|
||||
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
|
||||
access_key_id> accesskeyid
|
||||
|
||||
AWS Secret Access Key (password)
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a string value. Press Enter for the default ("").
|
||||
|
||||
secret_access_key> secretaccesskey
|
||||
|
||||
Region where your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Europe Region 1
|
||||
\ (eu-001)
|
||||
2 / Europe Region 2
|
||||
\ (eu-002)
|
||||
3 / US Region 1
|
||||
\ (us-001)
|
||||
4 / US Region 2
|
||||
\ (us-002)
|
||||
5 / Asia (Taiwan)
|
||||
\ (tw-001)
|
||||
|
||||
region > 1
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for Synology C2 Object Storage API.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / EU Endpoint 1
|
||||
\ (eu-001.s3.synologyc2.net)
|
||||
2 / US Endpoint 1
|
||||
\ (us-001.s3.synologyc2.net)
|
||||
3 / TW Endpoint 1
|
||||
\ (tw-001.s3.synologyc2.net)
|
||||
|
||||
endpoint> 1
|
||||
|
||||
Option location_constraint.
|
||||
Location constraint - must be set to match the Region.
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
location_constraint>
|
||||
|
||||
Edit advanced config? (y/n)
|
||||
y) Yes
|
||||
n) No
|
||||
y/n> y
|
||||
|
||||
Option no_check_bucket.
|
||||
If set, don't attempt to check the bucket exists or create it.
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions. Before v1.52.0 this would have passed silently
|
||||
due to a bug.
|
||||
Enter a boolean value (true or false). Press Enter for the default (true).
|
||||
|
||||
no_check_bucket> true
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: Synology
|
||||
- region: eu-001
|
||||
- endpoint: eu-001.s3.synologyc2.net
|
||||
- no_check_bucket: true
|
||||
Keep this "syno" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
|
||||
y/e/d> y
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.63.1
|
||||
v1.64.0
|
||||
@@ -302,19 +302,41 @@ func mustFindByName(name string) *fs.RegInfo {
|
||||
return fs.MustFind(fsType)
|
||||
}
|
||||
|
||||
// findByName finds the RegInfo for the remote name passed in or
|
||||
// returns an error
|
||||
func findByName(name string) (*fs.RegInfo, error) {
|
||||
fsType := FileGet(name, "type")
|
||||
if fsType == "" {
|
||||
return nil, fmt.Errorf("couldn't find type of fs for %q", name)
|
||||
}
|
||||
return fs.Find(fsType)
|
||||
}
|
||||
|
||||
// printRemoteOptions prints the options of the remote
|
||||
func printRemoteOptions(name string, prefix string, sep string) {
|
||||
fs := mustFindByName(name)
|
||||
func printRemoteOptions(name string, prefix string, sep string, redacted bool) {
|
||||
fsInfo, err := findByName(name)
|
||||
if err != nil {
|
||||
fmt.Printf("# %v\n", err)
|
||||
fsInfo = nil
|
||||
}
|
||||
for _, key := range LoadedData().GetKeyList(name) {
|
||||
isPassword := false
|
||||
for _, option := range fs.Options {
|
||||
if option.Name == key && option.IsPassword {
|
||||
isPassword = true
|
||||
break
|
||||
isSensitive := false
|
||||
if fsInfo != nil {
|
||||
for _, option := range fsInfo.Options {
|
||||
if option.Name == key {
|
||||
if option.IsPassword {
|
||||
isPassword = true
|
||||
} else if option.Sensitive {
|
||||
isSensitive = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
value := FileGet(name, key)
|
||||
if isPassword && value != "" {
|
||||
if redacted && (isSensitive || isPassword) && value != "" {
|
||||
fmt.Printf("%s%s%sXXX\n", prefix, key, sep)
|
||||
} else if isPassword && value != "" {
|
||||
fmt.Printf("%s%s%s*** ENCRYPTED ***\n", prefix, key, sep)
|
||||
} else {
|
||||
fmt.Printf("%s%s%s%s\n", prefix, key, sep, value)
|
||||
@@ -324,13 +346,19 @@ func printRemoteOptions(name string, prefix string, sep string) {
|
||||
|
||||
// listRemoteOptions lists the options of the remote
|
||||
func listRemoteOptions(name string) {
|
||||
printRemoteOptions(name, "- ", ": ")
|
||||
printRemoteOptions(name, "- ", ": ", false)
|
||||
}
|
||||
|
||||
// ShowRemote shows the contents of the remote in config file format
|
||||
func ShowRemote(name string) {
|
||||
fmt.Printf("[%s]\n", name)
|
||||
printRemoteOptions(name, "", " = ")
|
||||
printRemoteOptions(name, "", " = ", false)
|
||||
}
|
||||
|
||||
// ShowRedactedRemote shows the contents of the remote in config file format
|
||||
func ShowRedactedRemote(name string) {
|
||||
fmt.Printf("[%s]\n", name)
|
||||
printRemoteOptions(name, "", " = ", true)
|
||||
}
|
||||
|
||||
// OkRemote prints the contents of the remote and ask if it is OK
|
||||
@@ -634,6 +662,22 @@ func ShowConfig() {
|
||||
fmt.Printf("%s", str)
|
||||
}
|
||||
|
||||
// ShowRedactedConfig prints the redacted (unencrypted) config options
|
||||
func ShowRedactedConfig() {
|
||||
remotes := LoadedData().GetSectionList()
|
||||
if len(remotes) == 0 {
|
||||
fmt.Println("; empty config")
|
||||
return
|
||||
}
|
||||
sort.Strings(remotes)
|
||||
for i, remote := range remotes {
|
||||
if i != 0 {
|
||||
fmt.Println()
|
||||
}
|
||||
ShowRedactedRemote(remote)
|
||||
}
|
||||
}
|
||||
|
||||
// EditConfig edits the config file interactively
|
||||
func EditConfig(ctx context.Context) (err error) {
|
||||
for {
|
||||
|
||||
@@ -243,6 +243,7 @@ func TestOptionMarshalJSON(t *testing.T) {
|
||||
"NoPrefix": false,
|
||||
"Advanced": true,
|
||||
"Exclusive": false,
|
||||
"Sensitive": false,
|
||||
"DefaultStr": "false",
|
||||
"ValueStr": "true",
|
||||
"Type": "bool"
|
||||
|
||||
@@ -826,17 +826,19 @@ func Same(fdst, fsrc fs.Info) bool {
|
||||
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
|
||||
}
|
||||
|
||||
// fixRoot returns the Root with a trailing / if not empty. It is
|
||||
// aware of case insensitive filesystems.
|
||||
func fixRoot(f fs.Info) string {
|
||||
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
|
||||
// fixRoot returns the Root with a trailing / if not empty.
|
||||
//
|
||||
// It returns a case folded version for case insensitive file systems
|
||||
func fixRoot(f fs.Info) (s string, folded string) {
|
||||
s = strings.Trim(filepath.ToSlash(f.Root()), "/")
|
||||
if s != "" {
|
||||
s += "/"
|
||||
}
|
||||
folded = s
|
||||
if f.Features().CaseInsensitive {
|
||||
s = strings.ToLower(s)
|
||||
folded = strings.ToLower(s)
|
||||
}
|
||||
return s
|
||||
return s, folded
|
||||
}
|
||||
|
||||
// OverlappingFilterCheck returns true if fdst and fsrc point to the same
|
||||
@@ -845,37 +847,28 @@ func OverlappingFilterCheck(ctx context.Context, fdst fs.Fs, fsrc fs.Fs) bool {
|
||||
if !SameConfig(fdst, fsrc) {
|
||||
return false
|
||||
}
|
||||
fdstRoot := fixRoot(fdst)
|
||||
fsrcRoot := fixRoot(fsrc)
|
||||
if strings.HasPrefix(fdstRoot, fsrcRoot) {
|
||||
fdstRoot, fdstRootFolded := fixRoot(fdst)
|
||||
fsrcRoot, fsrcRootFolded := fixRoot(fsrc)
|
||||
if fdstRootFolded == fsrcRootFolded {
|
||||
return true
|
||||
} else if strings.HasPrefix(fdstRootFolded, fsrcRootFolded) {
|
||||
fdstRelative := fdstRoot[len(fsrcRoot):]
|
||||
return filterCheckR(ctx, fdstRelative, 0, fsrc)
|
||||
return filterCheck(ctx, fsrc, fdstRelative)
|
||||
} else if strings.HasPrefix(fsrcRootFolded, fdstRootFolded) {
|
||||
fsrcRelative := fsrcRoot[len(fdstRoot):]
|
||||
return filterCheck(ctx, fdst, fsrcRelative)
|
||||
}
|
||||
return strings.HasPrefix(fsrcRoot, fdstRoot)
|
||||
return false
|
||||
}
|
||||
|
||||
// filterCheckR checks if fdst would be included in the sync
|
||||
func filterCheckR(ctx context.Context, fdstRelative string, pos int, fsrc fs.Fs) bool {
|
||||
include := true
|
||||
// filterCheck checks if dir is included in f
|
||||
func filterCheck(ctx context.Context, f fs.Fs, dir string) bool {
|
||||
fi := filter.GetConfig(ctx)
|
||||
includeDirectory := fi.IncludeDirectory(ctx, fsrc)
|
||||
dirs := strings.SplitAfterN(fdstRelative, "/", pos+2)
|
||||
newPath := ""
|
||||
for i := 0; i <= pos; i++ {
|
||||
newPath += dirs[i]
|
||||
}
|
||||
if !strings.HasSuffix(newPath, "/") {
|
||||
newPath += "/"
|
||||
}
|
||||
if strings.HasPrefix(fdstRelative, newPath) {
|
||||
include, _ = includeDirectory(newPath)
|
||||
if include {
|
||||
if newPath == fdstRelative {
|
||||
return true
|
||||
}
|
||||
pos++
|
||||
include = filterCheckR(ctx, fdstRelative, pos, fsrc)
|
||||
}
|
||||
includeDirectory := fi.IncludeDirectory(ctx, f)
|
||||
include, err := includeDirectory(dir)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to discover whether directory is included: %v", err)
|
||||
return true
|
||||
}
|
||||
return include
|
||||
}
|
||||
@@ -886,9 +879,9 @@ func SameDir(fdst, fsrc fs.Info) bool {
|
||||
if !SameConfig(fdst, fsrc) {
|
||||
return false
|
||||
}
|
||||
fdstRoot := fixRoot(fdst)
|
||||
fsrcRoot := fixRoot(fsrc)
|
||||
return fdstRoot == fsrcRoot
|
||||
_, fdstRootFolded := fixRoot(fdst)
|
||||
_, fsrcRootFolded := fixRoot(fsrc)
|
||||
return fdstRootFolded == fsrcRootFolded
|
||||
}
|
||||
|
||||
// Retry runs fn up to maxTries times if it returns a retriable error
|
||||
|
||||
@@ -1418,11 +1418,13 @@ func TestOverlappingFilterCheckWithFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fi, err := filter.NewFilter(nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fi.Add(false, "*/exclude/"))
|
||||
fi.Opt.ExcludeFile = []string{".ignore"}
|
||||
require.NoError(t, fi.Add(false, "/exclude/"))
|
||||
require.NoError(t, fi.Add(false, "/Exclude2/"))
|
||||
require.NoError(t, fi.Add(true, "*"))
|
||||
ctx = filter.ReplaceConfig(ctx, fi)
|
||||
|
||||
src := &testFs{testFsInfo{name: "name", root: "root"}}
|
||||
src.features.CaseInsensitive = true
|
||||
slash := string(os.PathSeparator) // native path separator
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
@@ -1430,25 +1432,32 @@ func TestOverlappingFilterCheckWithFilter(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"name", "ROOT", true}, // case insensitive is set
|
||||
{"name", "/root", true},
|
||||
{"name", "root/", true},
|
||||
{"name", "root" + slash, true},
|
||||
{"name", "root/exclude", false},
|
||||
{"name", "root/Exclude2", false},
|
||||
{"name", "root/include", true},
|
||||
{"name", "root/exclude/", false},
|
||||
{"name", "root/Exclude2/", false},
|
||||
{"name", "root/exclude/sub", false},
|
||||
{"name", "root/Exclude2/sub", false},
|
||||
{"name", "/root/exclude/", false},
|
||||
{"name", "root" + slash + "exclude", false},
|
||||
{"name", "root" + slash + "exclude" + slash, false},
|
||||
{"name", "root/.ignore", false},
|
||||
{"name", "root" + slash + ".ignore", false},
|
||||
{"namey", "root/include", false},
|
||||
{"namey", "root/include/", false},
|
||||
{"namey", "root" + slash + "include", false},
|
||||
{"namey", "root" + slash + "include" + slash, false},
|
||||
} {
|
||||
dst := &testFs{testFsInfo{name: test.name, root: test.root}}
|
||||
dst.features.CaseInsensitive = true
|
||||
what := fmt.Sprintf("(%q,%q) vs (%q,%q)", src.name, src.root, dst.name, dst.root)
|
||||
actual := operations.OverlappingFilterCheck(ctx, dst, src)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
actual = operations.OverlappingFilterCheck(ctx, src, dst)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -389,10 +389,7 @@ func rcSetSoftMemoryLimit(ctx context.Context, in Params) (out Params, err error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldMemLimit, err := debug.SetMemoryLimit(memLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldMemLimit := debug.SetMemoryLimit(memLimit)
|
||||
out = Params{
|
||||
"existing-mem-limit": oldMemLimit,
|
||||
}
|
||||
|
||||
@@ -154,6 +154,7 @@ type Option struct {
|
||||
NoPrefix bool // set if the option for this should not use the backend prefix
|
||||
Advanced bool // set if this is an advanced config option
|
||||
Exclusive bool // set if the answer can only be one of the examples (empty string allowed unless Required or Default is set)
|
||||
Sensitive bool // set if this option should be redacted when using rclone config redacted
|
||||
}
|
||||
|
||||
// BaseOption is an alias for Option used internally
|
||||
|
||||
@@ -1423,7 +1423,7 @@ func TestSyncOverlapWithFilter(t *testing.T) {
|
||||
require.NoError(t, fi.Add(false, "/rclone-sync-test/"))
|
||||
require.NoError(t, fi.Add(false, "*/layer2/"))
|
||||
fi.Opt.ExcludeFile = []string{".ignore"}
|
||||
ctx = filter.ReplaceConfig(ctx, fi)
|
||||
filterCtx := filter.ReplaceConfig(ctx, fi)
|
||||
|
||||
subRemoteName := r.FremoteName + "/rclone-sync-test"
|
||||
FremoteSync, err := fs.NewFs(ctx, subRemoteName)
|
||||
@@ -1453,19 +1453,27 @@ func TestSyncOverlapWithFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
accounting.GlobalStats().ResetCounters()
|
||||
checkNoErr(Sync(ctx, FremoteSync, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, FremoteSync, r.Fremote, false))
|
||||
checkErr(Sync(ctx, FremoteSync, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync, false))
|
||||
checkErr(Sync(ctx, r.Fremote, FremoteSync, false))
|
||||
checkErr(Sync(filterCtx, r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(ctx, r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(filterCtx, FremoteSync, FremoteSync, false))
|
||||
checkErr(Sync(ctx, FremoteSync, FremoteSync, false))
|
||||
|
||||
checkNoErr(Sync(ctx, FremoteSync2, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, FremoteSync2, r.Fremote, false))
|
||||
checkErr(Sync(ctx, FremoteSync2, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync2, false))
|
||||
checkErr(Sync(ctx, r.Fremote, FremoteSync2, false))
|
||||
checkErr(Sync(ctx, r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(filterCtx, FremoteSync2, FremoteSync2, false))
|
||||
checkErr(Sync(ctx, FremoteSync2, FremoteSync2, false))
|
||||
|
||||
checkNoErr(Sync(ctx, FremoteSync3, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, FremoteSync3, r.Fremote, false))
|
||||
checkErr(Sync(ctx, FremoteSync3, r.Fremote, false))
|
||||
checkNoErr(Sync(filterCtx, r.Fremote, FremoteSync3, false))
|
||||
checkErr(Sync(ctx, r.Fremote, FremoteSync3, false))
|
||||
checkErr(Sync(ctx, r.Fremote, r.Fremote, false))
|
||||
checkErr(Sync(filterCtx, FremoteSync3, FremoteSync3, false))
|
||||
checkErr(Sync(ctx, FremoteSync3, FremoteSync3, false))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !go1.18
|
||||
// +build !go1.18
|
||||
//go:build !go1.19
|
||||
// +build !go1.19
|
||||
|
||||
package fs
|
||||
|
||||
// Upgrade to Go version 1.18 to compile rclone - latest stable go
|
||||
// Upgrade to Go version 1.19 to compile rclone - latest stable go
|
||||
// compiler recommended.
|
||||
func init() { Go_version_1_18_required_for_compilation() }
|
||||
func init() { Go_version_1_19_required_for_compilation() }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.63.1"
|
||||
var VersionTag = "v1.64.0"
|
||||
|
||||
@@ -73,7 +73,7 @@ func main() {
|
||||
configfile.Install()
|
||||
|
||||
// Seed the random number generator
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
|
||||
|
||||
// Filter selection
|
||||
if *testRemotes != "" {
|
||||
@@ -103,7 +103,7 @@ func main() {
|
||||
|
||||
// Runs we will do for this test in random order
|
||||
runs := conf.MakeRuns()
|
||||
rand.Shuffle(len(runs), runs.Swap)
|
||||
randInstance.Shuffle(len(runs), runs.Swap)
|
||||
|
||||
// Create Report
|
||||
report := NewReport()
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/rclone/rclone
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20221209211307-2abb8038c751
|
||||
|
||||
@@ -10,3 +10,9 @@ import (
|
||||
func SetGCPercent(percent int) int {
|
||||
return debug.SetGCPercent(percent)
|
||||
}
|
||||
|
||||
// SetMemoryLimit calls the runtime/debug.SetMemoryLimit function to set the
|
||||
// soft-memory limit.
|
||||
func SetMemoryLimit(limit int64) int64 {
|
||||
return debug.SetMemoryLimit(limit)
|
||||
}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
// SetMemoryLimit calls the runtime/debug.SetMemoryLimit function to set the
|
||||
// soft-memory limit.
|
||||
func SetMemoryLimit(limit int64) (int64, error) {
|
||||
return debug.SetMemoryLimit(limit), nil
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
//go:build !go1.19
|
||||
// +build !go1.19
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// SetMemoryLimit is a no-op on Go version < 1.19.
|
||||
func SetMemoryLimit(limit int64) (int64, error) {
|
||||
return limit, fmt.Errorf("not implemented on Go version below 1.19: %s", runtime.Version())
|
||||
}
|
||||
@@ -82,15 +82,18 @@ All done. Please go back to rclone.
|
||||
|
||||
// SharedOptions are shared between backends the utilize an OAuth flow
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "OAuth Client Id.\n\nLeave blank normally.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "OAuth Client Id.\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "OAuth Client Secret.\n\nLeave blank normally.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "OAuth Client Secret.\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigToken,
|
||||
Help: "OAuth Access Token as a JSON blob.",
|
||||
Advanced: true,
|
||||
Name: config.ConfigToken,
|
||||
Help: "OAuth Access Token as a JSON blob.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL.\n\nLeave blank to use the provider defaults.",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -163,7 +162,7 @@ const drainLimit = 10 * 1024 * 1024
|
||||
// drainAndClose discards up to drainLimit bytes from r and closes
|
||||
// it. Any errors from the Read or Close are returned.
|
||||
func drainAndClose(r io.ReadCloser) (err error) {
|
||||
_, readErr := io.CopyN(ioutil.Discard, r, drainLimit)
|
||||
_, readErr := io.CopyN(io.Discard, r, drainLimit)
|
||||
if readErr == io.EOF {
|
||||
readErr = nil
|
||||
}
|
||||
|
||||
126
rclone.1
generated
126
rclone.1
generated
@@ -1,7 +1,7 @@
|
||||
.\"t
|
||||
.\" Automatically generated by Pandoc 2.9.2.1
|
||||
.\"
|
||||
.TH "rclone" "1" "Jul 17, 2023" "User Manual" ""
|
||||
.TH "rclone" "1" "Jun 30, 2023" "User Manual" ""
|
||||
.hy
|
||||
.SH Rclone syncs your files to cloud storage
|
||||
.PP
|
||||
@@ -24,7 +24,7 @@ Donate. (https://rclone.org/donate/)
|
||||
Rclone is a command-line program to manage files on cloud storage.
|
||||
It is a feature-rich alternative to cloud vendors\[aq] web storage
|
||||
interfaces.
|
||||
Over 70 cloud storage products support rclone including S3 object
|
||||
Over 40 cloud storage products support rclone including S3 object
|
||||
stores, business & consumer file storage services, as well as standard
|
||||
transfer protocols.
|
||||
.PP
|
||||
@@ -3456,42 +3456,6 @@ not listed here.
|
||||
.IP \[bu] 2
|
||||
rclone config (https://rclone.org/commands/rclone_config/) - Enter an
|
||||
interactive configuration session.
|
||||
.SH rclone config redacted
|
||||
.PP
|
||||
Print redacted (decrypted) config file, or the redacted config for a
|
||||
single remote.
|
||||
.SH Synopsis
|
||||
.PP
|
||||
This prints a redacted copy of the config file, either the whole config
|
||||
file or for a given remote.
|
||||
.PP
|
||||
The config file will be redacted by replacing all passwords and other
|
||||
sensitive info with XXX.
|
||||
.PP
|
||||
This makes the config file suitable for posting online for support.
|
||||
.PP
|
||||
It should be double checked before posting as the redaction may not be
|
||||
perfect.
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
rclone config redacted [<remote>] [flags]
|
||||
\f[R]
|
||||
.fi
|
||||
.SH Options
|
||||
.IP
|
||||
.nf
|
||||
\f[C]
|
||||
-h, --help help for redacted
|
||||
\f[R]
|
||||
.fi
|
||||
.PP
|
||||
See the global flags page (https://rclone.org/flags/) for global options
|
||||
not listed here.
|
||||
.SH SEE ALSO
|
||||
.IP \[bu] 2
|
||||
rclone config (https://rclone.org/commands/rclone_config/) - Enter an
|
||||
interactive configuration session.
|
||||
.SH rclone config show
|
||||
.PP
|
||||
Print (decrypted) config file, or the config for a single remote.
|
||||
@@ -14863,7 +14827,7 @@ directory \f[C]dir\f[R] and sub directories.
|
||||
E.g.
|
||||
on Microsoft Windows
|
||||
\f[C]rclone ls remote: --exclude \[dq]*\[rs][{JP,KR,HK}\[rs]]*\[dq]\f[R]
|
||||
lists the files in \f[C]remote:\f[R] without \f[C][JP]\f[R] or
|
||||
lists the files in \f[C]remote:\f[R] with \f[C][JP]\f[R] or
|
||||
\f[C][KR]\f[R] or \f[C][HK]\f[R] in their name.
|
||||
Quotes prevent the shell from interpreting the \f[C]\[rs]\f[R]
|
||||
characters.\f[C]\[rs]\f[R] characters escape the \f[C][\f[R] and
|
||||
@@ -20848,7 +20812,7 @@ These flags are available for every command.
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.63.1\[dq])
|
||||
--user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.63.0\[dq])
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
\f[R]
|
||||
.fi
|
||||
@@ -38381,7 +38345,7 @@ Click \f[C]Submit\f[R]
|
||||
.IP "7." 3
|
||||
Switch to the \f[C]Settings\f[R] tab.
|
||||
Fill \f[C]OAuth2 - Redirect URIs\f[R] as
|
||||
\f[C]http://localhost:53682/\f[R] and click on \f[C]Add\f[R]
|
||||
\f[C]http://localhost:53682/\f[R]
|
||||
.IP "8." 3
|
||||
Find the \f[C]App key\f[R] and \f[C]App secret\f[R] values on the
|
||||
\f[C]Settings\f[R] tab.
|
||||
@@ -42488,7 +42452,8 @@ Under \[dq]ENABLE APIS AND SERVICES\[dq] search for \[dq]Drive\[dq], and
|
||||
enable the \[dq]Google Drive API\[dq].
|
||||
.IP " 4." 4
|
||||
Click \[dq]Credentials\[dq] in the left-side panel (not \[dq]Create
|
||||
credentials\[dq], which opens the wizard).
|
||||
credentials\[dq], which opens the wizard), then \[dq]Create
|
||||
credentials\[dq]
|
||||
.IP " 5." 4
|
||||
If you already configured an \[dq]Oauth Consent Screen\[dq], then skip
|
||||
to the next step; if not, click on \[dq]CONFIGURE CONSENT SCREEN\[dq]
|
||||
@@ -46110,6 +46075,9 @@ Mail.ru Cloud (https://cloud.mail.ru/) is a cloud storage provided by a
|
||||
Russian internet company Mail.Ru Group (https://mail.ru).
|
||||
The official desktop client is Disk-O: (https://disk-o.cloud/en),
|
||||
available on Windows and Mac OS.
|
||||
.PP
|
||||
Currently it is recommended to disable 2FA on Mail.ru accounts intended
|
||||
for rclone until it gets eventually implemented.
|
||||
.SS Features highlights
|
||||
.IP \[bu] 2
|
||||
Paths may be as deep as required, e.g.
|
||||
@@ -59513,60 +59481,6 @@ Options:
|
||||
.IP \[bu] 2
|
||||
\[dq]error\[dq]: return an error based on option value
|
||||
.SH Changelog
|
||||
.SS v1.63.1 - 2023-07-17
|
||||
.PP
|
||||
See commits (https://github.com/rclone/rclone/compare/v1.63.0...v1.63.1)
|
||||
.IP \[bu] 2
|
||||
Bug Fixes
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
build: Fix macos builds for versions < 12 (Anagh Kumar Baranwal)
|
||||
.IP \[bu] 2
|
||||
dirtree: Fix performance with large directories of directories and
|
||||
\f[C]--fast-list\f[R] (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
operations
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix deadlock when using \f[C]lsd\f[R]/\f[C]ls\f[R] with
|
||||
\f[C]--progress\f[R] (Nick Craig-Wood)
|
||||
.IP \[bu] 2
|
||||
Fix \f[C].rclonelink\f[R] files not being converted back to symlinks
|
||||
(Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
doc fixes (Dean Attali, Mahad, Nick Craig-Wood, Sawada Tsunayoshi,
|
||||
Vladislav Vorobev)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
Local
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix partial directory read for corrupted filesystem (Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
Box
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix reconnect failing with HTTP 400 Bad Request (albertony)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
Smb
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Fix \[dq]Statfs failed: bucket or container name is needed\[dq] when
|
||||
mounting (Nick Craig-Wood)
|
||||
.RE
|
||||
.IP \[bu] 2
|
||||
WebDAV
|
||||
.RS 2
|
||||
.IP \[bu] 2
|
||||
Nextcloud: fix must use /dav/files/USER endpoint not /webdav error
|
||||
(Paul)
|
||||
.IP \[bu] 2
|
||||
Nextcloud chunking: add more guidance for the user to check the config
|
||||
(darix)
|
||||
.RE
|
||||
.SS v1.63.0 - 2023-06-30
|
||||
.PP
|
||||
See commits (https://github.com/rclone/rclone/compare/v1.62.0...v1.63.0)
|
||||
@@ -73503,13 +73417,6 @@ mac-15 <usman.ilamdin@phpstudios.com>
|
||||
Forum for questions and general discussion:
|
||||
.IP \[bu] 2
|
||||
https://forum.rclone.org
|
||||
.SS Business support
|
||||
.PP
|
||||
For business support or sponsorship enquiries please see:
|
||||
.IP \[bu] 2
|
||||
https://rclone.com/
|
||||
.IP \[bu] 2
|
||||
sponsorship\[at]rclone.com
|
||||
.SS GitHub repository
|
||||
.PP
|
||||
The project\[aq]s repository is located at:
|
||||
@@ -73519,18 +73426,15 @@ https://github.com/rclone/rclone
|
||||
There you can file bug reports or contribute with pull requests.
|
||||
.SS Twitter
|
||||
.PP
|
||||
You can also follow Nick on twitter for rclone announcements:
|
||||
You can also follow me on twitter for rclone announcements:
|
||||
.IP \[bu] 2
|
||||
[\[at]njcw](https://twitter.com/njcw)
|
||||
.SS Email
|
||||
.PP
|
||||
Or if all else fails or you want to ask something private or
|
||||
confidential
|
||||
.IP \[bu] 2
|
||||
info\[at]rclone.com
|
||||
.PP
|
||||
Please don\[aq]t email requests for help to this address - those are
|
||||
better directed to the forum unless you\[aq]d like to sign up for
|
||||
business support.
|
||||
confidential email Nick Craig-Wood (mailto:nick@craig-wood.com).
|
||||
Please don\[aq]t email me requests for help - those are better directed
|
||||
to the forum.
|
||||
Thanks!
|
||||
.SH AUTHORS
|
||||
Nick Craig-Wood.
|
||||
|
||||
115
vfs/dir.go
115
vfs/dir.go
@@ -22,9 +22,10 @@ import (
|
||||
|
||||
// Dir represents a directory entry
|
||||
type Dir struct {
|
||||
vfs *VFS // read only
|
||||
inode uint64 // read only: inode number
|
||||
f fs.Fs // read only
|
||||
vfs *VFS // read only
|
||||
inode uint64 // read only: inode number
|
||||
f fs.Fs // read only
|
||||
cleanupTimer *time.Timer // read only: timer to call cacheCleanup
|
||||
|
||||
mu sync.RWMutex // protects the following
|
||||
parent *Dir // parent, nil for root
|
||||
@@ -37,6 +38,8 @@ type Dir struct {
|
||||
|
||||
modTimeMu sync.Mutex // protects the following
|
||||
modTime time.Time
|
||||
|
||||
_childVirtuals atomic.Int32 // non zero if any children have virtual directory entries
|
||||
}
|
||||
|
||||
//go:generate stringer -type=vState
|
||||
@@ -52,7 +55,7 @@ const (
|
||||
)
|
||||
|
||||
func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
||||
return &Dir{
|
||||
d := &Dir{
|
||||
vfs: vfs,
|
||||
f: f,
|
||||
parent: parent,
|
||||
@@ -62,6 +65,25 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
||||
inode: newInode(),
|
||||
items: make(map[string]Node),
|
||||
}
|
||||
d.cleanupTimer = time.AfterFunc(vfs.Opt.DirCacheTime*2, d.cacheCleanup)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Dir) cacheCleanup() {
|
||||
defer func() {
|
||||
// We should never panic here
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
when := time.Now()
|
||||
|
||||
d.mu.Lock()
|
||||
_, stale := d._age(when)
|
||||
d.mu.Unlock()
|
||||
|
||||
if stale {
|
||||
d.ForgetAll()
|
||||
}
|
||||
}
|
||||
|
||||
// String converts it to printable
|
||||
@@ -174,38 +196,81 @@ func (d *Dir) Node() Node {
|
||||
return d
|
||||
}
|
||||
|
||||
// hasVirtuals returns whether the directory has virtual entries
|
||||
func (d *Dir) hasVirtuals() bool {
|
||||
return d._childVirtuals.Load() != 0
|
||||
}
|
||||
|
||||
// getVirtuals returns the number of virtual entries in this and children
|
||||
func (d *Dir) getVirtuals() int32 {
|
||||
return d._childVirtuals.Load()
|
||||
}
|
||||
|
||||
// addVirtuals increments or decrements the number of virtual
|
||||
// directories by the amount given in this and all the parent
|
||||
// directories.
|
||||
func (d *Dir) addVirtuals(inc int32) {
|
||||
for {
|
||||
d._childVirtuals.Add(inc)
|
||||
d.mu.RLock()
|
||||
parent := d.parent
|
||||
d.mu.RUnlock()
|
||||
if parent == nil {
|
||||
break
|
||||
}
|
||||
d = parent
|
||||
}
|
||||
}
|
||||
|
||||
// _addVirtuals increments or decrements the number of virtual
|
||||
// directories by the amount given in this and all the parent
|
||||
// directories.
|
||||
//
|
||||
// The dir lock must be held to call this
|
||||
func (d *Dir) _addVirtuals(inc int32) {
|
||||
d._childVirtuals.Add(inc)
|
||||
if d.parent == nil {
|
||||
return
|
||||
}
|
||||
d.parent.addVirtuals(inc)
|
||||
}
|
||||
|
||||
// ForgetAll forgets directory entries for this directory and any children.
|
||||
//
|
||||
// It does not invalidate or clear the cache of the parent directory.
|
||||
//
|
||||
// It returns true if the directory or any of its children had virtual entries
|
||||
// so could not be forgotten. Children which didn't have virtual entries and
|
||||
// children with virtual entries will be forgotten even if true is returned.
|
||||
func (d *Dir) ForgetAll() (hasVirtual bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
// Directories or parents of directories with virtual entries won't be
|
||||
// forgotten.
|
||||
func (d *Dir) ForgetAll() {
|
||||
d.mu.RLock()
|
||||
|
||||
fs.Debugf(d.path, "forgetting directory cache")
|
||||
for _, node := range d.items {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
if dir.ForgetAll() {
|
||||
hasVirtual = true
|
||||
}
|
||||
dir.ForgetAll()
|
||||
}
|
||||
}
|
||||
|
||||
d.mu.RUnlock()
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
// Purge any unnecessary virtual entries
|
||||
d._purgeVirtual()
|
||||
|
||||
d.read = time.Time{}
|
||||
// Check if this dir has virtual entries
|
||||
if len(d.virtual) != 0 {
|
||||
hasVirtual = true
|
||||
}
|
||||
|
||||
// Don't clear directory entries if there are virtual entries in this
|
||||
// directory or any children
|
||||
if !hasVirtual {
|
||||
d.items = make(map[string]Node)
|
||||
if d.hasVirtuals() {
|
||||
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
return
|
||||
}
|
||||
return hasVirtual
|
||||
|
||||
// Forget the items and stop the timer
|
||||
d.items = make(map[string]Node)
|
||||
d.cleanupTimer.Stop()
|
||||
}
|
||||
|
||||
// forgetDirPath clears the cache for itself and all subdirectories if
|
||||
@@ -350,6 +415,9 @@ func (d *Dir) renameTree(dirPath string) {
|
||||
// reading everything again
|
||||
func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) {
|
||||
d.ForgetAll()
|
||||
virtuals := d.getVirtuals()
|
||||
d.addVirtuals(-virtuals)
|
||||
newParent.addVirtuals(virtuals)
|
||||
|
||||
d.modTimeMu.Lock()
|
||||
d.modTime = fsDir.ModTime(context.TODO())
|
||||
@@ -386,6 +454,7 @@ func (d *Dir) addObject(node Node) {
|
||||
d.items[leaf] = node
|
||||
if d.virtual == nil {
|
||||
d.virtual = make(map[string]vState)
|
||||
d._addVirtuals(1)
|
||||
}
|
||||
vAdd := vAddFile
|
||||
if node.IsDir() {
|
||||
@@ -434,6 +503,7 @@ func (d *Dir) delObject(leaf string) {
|
||||
delete(d.items, leaf)
|
||||
if d.virtual == nil {
|
||||
d.virtual = make(map[string]vState)
|
||||
d._addVirtuals(1)
|
||||
}
|
||||
d.virtual[leaf] = vDel
|
||||
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vDel, leaf)
|
||||
@@ -475,6 +545,8 @@ func (d *Dir) _readDir() error {
|
||||
}
|
||||
|
||||
d.read = when
|
||||
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -493,6 +565,7 @@ func (d *Dir) _deleteVirtual(name string) {
|
||||
delete(d.virtual, name)
|
||||
if len(d.virtual) == 0 {
|
||||
d.virtual = nil
|
||||
d._addVirtuals(-1)
|
||||
}
|
||||
fs.Debugf(d.path, "Removed virtual directory entry %v: %q", virtualState, name)
|
||||
}
|
||||
@@ -654,6 +727,7 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
|
||||
dir.read = time.Time{}
|
||||
} else {
|
||||
dir.read = when
|
||||
dir.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
}
|
||||
dir.mu.Unlock()
|
||||
if err != nil {
|
||||
@@ -691,6 +765,7 @@ func (d *Dir) readDirTree() error {
|
||||
}
|
||||
fs.Debugf(d.path, "Reading directory tree done in %s", time.Since(when))
|
||||
d.read = when
|
||||
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user