mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 11:53:17 +00:00
Compare commits
84 Commits
v1.63.0
...
protondriv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ea8b3db4f | ||
|
|
91d095f468 | ||
|
|
bff702a6f1 | ||
|
|
a1d6bbd31f | ||
|
|
fb6a9dfbf3 | ||
|
|
3f3c5f3ff4 | ||
|
|
89196cb353 | ||
|
|
9284506b86 | ||
|
|
88c72d1f4d | ||
|
|
5e3bf50b2e | ||
|
|
982f76b4df | ||
|
|
347812d1d3 | ||
|
|
f4449440f8 | ||
|
|
e66675d346 | ||
|
|
45228e2f18 | ||
|
|
b866850fdd | ||
|
|
5b63b9534f | ||
|
|
10449c86a4 | ||
|
|
26a9a9fed2 | ||
|
|
602e42d334 | ||
|
|
4c5a21703e | ||
|
|
f2ee949eff | ||
|
|
3ad255172c | ||
|
|
29b1751d0e | ||
|
|
363da9aa82 | ||
|
|
6c8148ef39 | ||
|
|
3ed4a2e963 | ||
|
|
aaadb48d48 | ||
|
|
52e25c43b9 | ||
|
|
9a66563fc6 | ||
|
|
6ca670d66a | ||
|
|
809653055d | ||
|
|
61325ce507 | ||
|
|
c3989d1906 | ||
|
|
a79887171c | ||
|
|
f29e284c90 | ||
|
|
9a66086fa0 | ||
|
|
1845c261c6 | ||
|
|
70cbcef624 | ||
|
|
9169b2b5ab | ||
|
|
0957c8fb74 | ||
|
|
bb0cd76a5f | ||
|
|
08240c8cf5 | ||
|
|
014acc902d | ||
|
|
33fec9c835 | ||
|
|
3a5ffc7839 | ||
|
|
8a6bf35481 | ||
|
|
f7d27f4bf2 | ||
|
|
378a2d21ee | ||
|
|
3404eb0444 | ||
|
|
13e5701f2a | ||
|
|
432d5d1e20 | ||
|
|
cc05159518 | ||
|
|
119ccb2b95 | ||
|
|
0ef0e908ca | ||
|
|
0063d14dbb | ||
|
|
0d34efb10f | ||
|
|
415f4b2b93 | ||
|
|
07cf5f1d25 | ||
|
|
7d31956169 | ||
|
|
473d443874 | ||
|
|
e294b76121 | ||
|
|
8f3c583870 | ||
|
|
d0d41fe847 | ||
|
|
297f15a3e3 | ||
|
|
d5f0affd4b | ||
|
|
0598aafbfd | ||
|
|
528e22f139 | ||
|
|
f1a8420814 | ||
|
|
e250f1afcd | ||
|
|
ebf24c9872 | ||
|
|
b4c7b240d8 | ||
|
|
22a14a8c98 | ||
|
|
07133b892d | ||
|
|
a8ca18165e | ||
|
|
8c4e71fc84 | ||
|
|
351e2db2ef | ||
|
|
2234feb23d | ||
|
|
fb5125ecee | ||
|
|
e8cbc54a06 | ||
|
|
00512e1303 | ||
|
|
fcfbd3153b | ||
|
|
9a8075b682 | ||
|
|
996037bee9 |
35
.github/workflows/build.yml
vendored
35
.github/workflows/build.yml
vendored
@@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.21.0-rc.3'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -130,6 +130,11 @@ jobs:
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
# https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788
|
||||
# https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008
|
||||
unset HOMEBREW_NO_INSTALL_FROM_API
|
||||
brew untap --force homebrew/core
|
||||
brew untap --force homebrew/cask
|
||||
brew update
|
||||
brew install --cask macfuse
|
||||
if: matrix.os == 'macos-11'
|
||||
@@ -239,7 +244,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -264,7 +269,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: '1.21.0-rc.3'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
@@ -419,7 +419,7 @@ remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
|
||||
@@ -18,6 +18,7 @@ Current active maintainers of rclone are:
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -51,6 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -84,6 +85,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2)
|
||||
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
|
||||
package azureblob
|
||||
@@ -95,6 +95,7 @@ Leave blank to use SAS URL or Emulator, otherwise it needs to be set.
|
||||
If this is blank and if env_auth is set it will be read from the
|
||||
environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: `Read credentials from runtime (environment variables, CLI or MSI).
|
||||
@@ -106,11 +107,13 @@ See the [authentication docs](/azureblob#authentication) for full info.`,
|
||||
Help: `Storage Account Shared Key.
|
||||
|
||||
Leave blank to use SAS URL or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sas_url",
|
||||
Help: `SAS URL for container level access only.
|
||||
|
||||
Leave blank if using account/key or Emulator.`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: `ID of the service principal's tenant. Also called its directory ID.
|
||||
@@ -120,6 +123,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_id",
|
||||
Help: `The ID of the client in use.
|
||||
@@ -129,6 +133,7 @@ Set this if using
|
||||
- Service principal with certificate
|
||||
- User with username and password
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_secret",
|
||||
Help: `One of the service principal's client secrets
|
||||
@@ -136,6 +141,7 @@ Set this if using
|
||||
Set this if using
|
||||
- Service principal with client secret
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "client_certificate_path",
|
||||
Help: `Path to a PEM or PKCS12 certificate file including the private key.
|
||||
@@ -173,7 +179,8 @@ Optionally set this if using
|
||||
Set this if using
|
||||
- User with username and password
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: `The user's password
|
||||
@@ -216,17 +223,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_object_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_client_id",
|
||||
Help: "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Name: "msi_mi_res_id",
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_emulator",
|
||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -75,13 +75,15 @@ func init() {
|
||||
Description: "Backblaze B2",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Account ID or Application Key ID.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Name: "key",
|
||||
Help: "Application Key.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
|
||||
@@ -77,7 +77,7 @@ var (
|
||||
)
|
||||
|
||||
type boxCustomClaims struct {
|
||||
jwt.RegisteredClaims
|
||||
jwt.StandardClaims
|
||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||
}
|
||||
|
||||
@@ -107,16 +107,18 @@ func init() {
|
||||
return nil, nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
@@ -206,12 +208,14 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
||||
}
|
||||
|
||||
claims = &boxCustomClaims{
|
||||
RegisteredClaims: jwt.RegisteredClaims{
|
||||
ID: val,
|
||||
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
Id: val,
|
||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||
Subject: boxConfig.EnterpriseID,
|
||||
Audience: jwt.ClaimStrings{tokenURL},
|
||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(time.Second * 45)),
|
||||
Audience: tokenURL,
|
||||
ExpiresAt: time.Now().Add(time.Second * 45).Unix(),
|
||||
},
|
||||
BoxSubType: boxSubType,
|
||||
}
|
||||
|
||||
14
backend/cache/cache.go
vendored
14
backend/cache/cache.go
vendored
@@ -76,17 +76,19 @@ func init() {
|
||||
Name: "plex_url",
|
||||
Help: "The URL of the Plex server.",
|
||||
}, {
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Name: "plex_username",
|
||||
Help: "The username of the Plex user.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_password",
|
||||
Help: "The password of the Plex user.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Name: "plex_token",
|
||||
Help: "The plex token for authentication - auto set normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "plex_insecure",
|
||||
Help: "Skip all certificate verification when connecting to the Plex server.",
|
||||
|
||||
4
backend/cache/cache_upload_test.go
vendored
4
backend/cache/cache_upload_test.go
vendored
@@ -160,11 +160,11 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
@@ -277,20 +277,23 @@ Leave blank normally.
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Name: "team_drive",
|
||||
Help: "ID of the Shared Drive (Team Drive).",
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_owner_only",
|
||||
Default: false,
|
||||
@@ -416,10 +419,11 @@ date is used.`,
|
||||
Help: "Size of listing chunk 100-1000, 0 to disable.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user when using a service account.`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "alternate_export",
|
||||
Default: false,
|
||||
@@ -592,7 +596,8 @@ Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -182,8 +182,9 @@ client_secret) to use this option as currently rclone's default set of
|
||||
permissions doesn't include "members.read". This can be added once
|
||||
v1.55 or later is in use everywhere.
|
||||
`,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "shared_files",
|
||||
Help: `Instructs rclone to work on individual shared files.
|
||||
|
||||
@@ -408,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||
request := &MoveDirRequest{
|
||||
FolderID: folderID,
|
||||
DestinationFolderID: destinationFolderID,
|
||||
Rename: newLeaf,
|
||||
// DestinationUser: destinationUser,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveDirResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
|
||||
@@ -38,8 +38,9 @@ func init() {
|
||||
Description: "1Fichier",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
||||
Name: "api_key",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter.",
|
||||
Name: "shared_folder",
|
||||
@@ -487,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists.
|
||||
//
|
||||
// This is complicated by the fact that we can't use moveDir to move
|
||||
// to a different directory AND rename at the same time as it can
|
||||
// overwrite files in the source directory.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *MoveDirResponse
|
||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -560,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -70,6 +70,22 @@ type MoveFileResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// MoveDirRequest is the request structure of the corresponding request
|
||||
type MoveDirRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||
DestinationUser string `json:"destination_user"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveDirResponse is the response structure of the corresponding request
|
||||
type MoveDirResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
OldName string `json:"old_name"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
|
||||
@@ -84,6 +84,7 @@ Leave blank normally.
|
||||
|
||||
Fill in to make rclone start with directory of a given ID.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "permanent_token",
|
||||
Help: `Permanent Authentication Token.
|
||||
@@ -97,6 +98,7 @@ These tokens are normally valid for several years.
|
||||
|
||||
For more info see: https://docs.storagemadeeasy.com/organisationcloud/api-tokens
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token",
|
||||
Help: `Session Token.
|
||||
@@ -106,7 +108,8 @@ usually valid for 1 hour.
|
||||
|
||||
Don't set this value - rclone will set it automatically.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "token_expiry",
|
||||
Help: `Token expiry time.
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -48,13 +49,15 @@ func init() {
|
||||
Description: "FTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "FTP username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port number.",
|
||||
@@ -172,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -216,6 +231,7 @@ type Options struct {
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -357,7 +373,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -91,18 +91,21 @@ func init() {
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Name: "project_number",
|
||||
Help: "Project number.\n\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Name: "user_project",
|
||||
Help: "User project.\n\nOptional - needed only for requester pays.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: "service_account_credentials",
|
||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "anonymous",
|
||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||
|
||||
@@ -19,9 +19,10 @@ func init() {
|
||||
Description: "Hadoop distributed file system",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Name: "namenode",
|
||||
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "username",
|
||||
Help: "Hadoop user name.",
|
||||
@@ -29,6 +30,7 @@ func init() {
|
||||
Value: "root",
|
||||
Help: "Connect to hdfs as root.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "service_principal_name",
|
||||
Help: `Kerberos service principal name for the namenode.
|
||||
@@ -36,7 +38,8 @@ func init() {
|
||||
Enables KERBEROS authentication. Specifies the Service Principal Name
|
||||
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
|
||||
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "data_transfer_protection",
|
||||
Help: `Kerberos data transfer protection: authentication|integrity|privacy.
|
||||
|
||||
@@ -133,11 +133,13 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
},
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Name: "access_key_id",
|
||||
Help: "IAS3 Access Key.\n\nLeave blank for anonymous access.\nYou can find one here: https://archive.org/account/s3.php",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Name: "secret_access_key",
|
||||
Help: "IAS3 Secret Key (password).\n\nLeave blank for anonymous access.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// their official client (https://github.com/jjjake/internetarchive) hardcodes following the two
|
||||
Name: "endpoint",
|
||||
|
||||
@@ -74,6 +74,10 @@ const (
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -84,7 +88,7 @@ func init() {
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
|
||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||
@@ -119,7 +123,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -139,6 +143,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -261,6 +268,21 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
},
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
|
||||
@@ -61,9 +61,10 @@ func init() {
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Your user name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
|
||||
|
||||
@@ -516,7 +516,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -514,3 +515,43 @@ func TestFilterSymlinkCopyLinks(t *testing.T) {
|
||||
func TestFilterSymlinkLinks(t *testing.T) {
|
||||
testFilterSymlink(t, false)
|
||||
}
|
||||
|
||||
func TestCopySymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file and a symlink to it
|
||||
r.WriteFile("src/file.txt", "hello world", when)
|
||||
require.NoError(t, os.Symlink("file.txt", filepath.Join(r.LocalName, "src", "link.txt")))
|
||||
defer func() {
|
||||
// Reset -L/-l mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Lstat
|
||||
}()
|
||||
|
||||
// Set fs into "-l/--links" mode
|
||||
f.opt.FollowSymlinks = false
|
||||
f.opt.TranslateSymlinks = true
|
||||
f.lstat = os.Lstat
|
||||
|
||||
// Create dst
|
||||
require.NoError(t, f.Mkdir(ctx, "dst"))
|
||||
|
||||
// Do copy from src into dst
|
||||
src, err := f.NewObject(ctx, "src/link.txt.rclonelink")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, src)
|
||||
dst, err := operations.Copy(ctx, f, nil, "dst/link.txt.rclonelink", src)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, dst)
|
||||
|
||||
// Test that we made a symlink and it has the right contents
|
||||
dstPath := filepath.Join(r.LocalName, "dst", "link.txt")
|
||||
linkContents, err := os.Readlink(dstPath)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file.txt", linkContents)
|
||||
}
|
||||
|
||||
@@ -85,10 +85,11 @@ func init() {
|
||||
Name: "mailru",
|
||||
Description: "Mail.ru Cloud",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
@@ -213,7 +214,7 @@ Supported quirks: atomicmkdir binlist unknowndirs`,
|
||||
encoder.EncodeWin | // :?"*<>|
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -58,9 +58,10 @@ func init() {
|
||||
Description: "Mega",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "User name.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
|
||||
@@ -65,11 +65,13 @@ HTTP is provided primarily for debugging purposes.`,
|
||||
Help: `Domain+path of NetStorage host to connect to.
|
||||
|
||||
Format should be ` + "`<domain>/<internal folders>`",
|
||||
Required: true,
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Name: "account",
|
||||
Help: "Set the NetStorage account name",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret",
|
||||
Help: `Set the NetStorage account secret/G2O key for authentication.
|
||||
|
||||
@@ -131,10 +131,11 @@ Note that the chunks will be buffered into memory.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Name: "drive_id",
|
||||
Help: "The ID of the drive to use.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "drive_type",
|
||||
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
|
||||
@@ -148,7 +149,8 @@ This isn't normally needed, but in special circumstances you might
|
||||
know the folder ID that you wish to access but not be able to get
|
||||
there through a path traversal.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_scopes",
|
||||
Help: `Set scopes to be requested by rclone.
|
||||
@@ -260,7 +262,8 @@ this flag there.
|
||||
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
|
||||
@@ -42,9 +42,10 @@ func init() {
|
||||
Description: "OpenDrive",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Name: "username",
|
||||
Help: "Username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password.",
|
||||
|
||||
@@ -92,14 +92,16 @@ func newOptions() []fs.Option {
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
|
||||
@@ -110,10 +110,11 @@ func init() {
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Name: "root_folder_id",
|
||||
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
||||
Default: "d0",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hostname",
|
||||
Help: `Hostname to connect to.
|
||||
@@ -138,7 +139,8 @@ with rclone authorize.
|
||||
This is only required when you want to use the cleanup command. Due to a bug
|
||||
in the pcloud API the required API does not support OAuth authentication so
|
||||
we have to rely on user password authentication for it.`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your pcloud password.",
|
||||
|
||||
@@ -158,9 +158,10 @@ func init() {
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
},
|
||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
Name: "user",
|
||||
Help: "Pikpak username.",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Pikpak password.",
|
||||
@@ -173,7 +174,8 @@ Leave blank normally.
|
||||
|
||||
Fill in for rclone to use a non root folder as its starting point.
|
||||
`,
|
||||
Advanced: true,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_trash",
|
||||
Default: true,
|
||||
|
||||
@@ -82,14 +82,15 @@ func init() {
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "api_key",
|
||||
Help: `API Key.
|
||||
|
||||
This is not normally used - use oauth instead.
|
||||
`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Default: "",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -99,7 +100,7 @@ This is not normally used - use oauth instead.
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
1078
backend/protondrive/protondrive.go
Normal file
1078
backend/protondrive/protondrive.go
Normal file
File diff suppressed because it is too large
Load Diff
16
backend/protondrive/protondrive_test.go
Normal file
16
backend/protondrive/protondrive_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/protondrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestProtonDrive:",
|
||||
NilObject: (*protondrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -67,7 +67,7 @@ func init() {
|
||||
NoOffline: true,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
@@ -77,7 +77,7 @@ func init() {
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -49,11 +49,13 @@ func init() {
|
||||
Help: "Get QingStor credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
|
||||
|
||||
100
backend/s3/s3.go
100
backend/s3/s3.go
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, China Mobile, Cloudflare, GCS, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Leviia, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -127,6 +127,9 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Leviia",
|
||||
Help: "Leviia Object Storage",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
@@ -154,6 +157,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Storj",
|
||||
Help: "Storj (S3 Compatible Gateway)",
|
||||
}, {
|
||||
Value: "Synology",
|
||||
Help: "Synology C2 Object Storage",
|
||||
}, {
|
||||
Value: "TencentCOS",
|
||||
Help: "Tencent Cloud Object Storage (COS)",
|
||||
@@ -179,11 +185,13 @@ func init() {
|
||||
Help: "Get AWS credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "access_key_id",
|
||||
Help: "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Name: "secret_access_key",
|
||||
Help: "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// References:
|
||||
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
|
||||
@@ -463,10 +471,30 @@ func init() {
|
||||
Value: "sa-east-1",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your data stored.\n",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001",
|
||||
Help: "Europe Region 1",
|
||||
}, {
|
||||
Value: "eu-002",
|
||||
Help: "Europe Region 2",
|
||||
}, {
|
||||
Value: "us-001",
|
||||
Help: "US Region 1",
|
||||
}, {
|
||||
Value: "us-002",
|
||||
Help: "US Region 2",
|
||||
}, {
|
||||
Value: "tw-001",
|
||||
Help: "Asia (Taiwan)",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -815,6 +843,15 @@ func init() {
|
||||
Value: "s3.sa-east-1.petabox.io",
|
||||
Help: "South America (São Paulo)",
|
||||
}},
|
||||
}, {
|
||||
// Leviia endpoints: https://www.leviia.com/object-storage/
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Leviia Object Storage API.",
|
||||
Provider: "Leviia",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3.leviia.com",
|
||||
Help: "The default endpoint\nLeviia",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
@@ -1000,6 +1037,26 @@ func init() {
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Synology C2 Object Storage API.",
|
||||
Provider: "Synology",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "eu-001.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 1",
|
||||
}, {
|
||||
Value: "eu-002.s3.synologyc2.net",
|
||||
Help: "EU Endpoint 2",
|
||||
}, {
|
||||
Value: "us-001.s3.synologyc2.net",
|
||||
Help: "US Endpoint 1",
|
||||
}, {
|
||||
Value: "us-002.s3.synologyc2.net",
|
||||
Help: "US Endpoint 2",
|
||||
}, {
|
||||
Value: "tw-001.s3.synologyc2.net",
|
||||
Help: "TW Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
Name: "endpoint",
|
||||
@@ -1156,7 +1213,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1644,7 +1701,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1659,7 +1716,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Cloudflare",
|
||||
Provider: "!Storj,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1775,6 +1832,7 @@ header is added and the default (private) will be used.
|
||||
Value: "arn:aws:kms:us-east-1:*",
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
@@ -1786,6 +1844,7 @@ Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
@@ -1797,6 +1856,7 @@ Alternatively you can provide --sse-customer-key.`,
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_md5",
|
||||
Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
|
||||
@@ -1809,6 +1869,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in S3.",
|
||||
@@ -2050,9 +2111,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Name: "session_token",
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -2969,11 +3031,15 @@ func setQuirks(opt *Options) {
|
||||
if opt.ChunkSize < 64*fs.Mebi {
|
||||
opt.ChunkSize = 64 * fs.Mebi
|
||||
}
|
||||
case "Synology":
|
||||
useMultipartEtag = false
|
||||
case "TencentCOS":
|
||||
listObjectsV2 = false // untested
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Leviia":
|
||||
// No quirks
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
@@ -4355,17 +4421,17 @@ to normal storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
|
||||
@@ -67,15 +67,18 @@ func init() {
|
||||
Value: "https://cloud.seafile.com/",
|
||||
Help: "Connect to cloud.seafile.com.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Name: configUser,
|
||||
Help: "User name (usually email address).",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
// Password is not required, it will be left blank for 2FA
|
||||
Name: configPassword,
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config2FA,
|
||||
Help: "Two-factor authentication ('true' if the account has 2FA enabled).",
|
||||
@@ -87,6 +90,7 @@ func init() {
|
||||
Name: configLibraryKey,
|
||||
Help: "Library password (for encrypted libraries only).\n\nLeave blank if you pass it through the command line.",
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: configCreateLibrary,
|
||||
Help: "Should rclone create a library if it doesn't exist.",
|
||||
@@ -94,9 +98,10 @@ func init() {
|
||||
Default: false,
|
||||
}, {
|
||||
// Keep the authentication token after entering the 2FA code
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Name: configAuthToken,
|
||||
Help: "Authentication token.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -59,13 +58,15 @@ func init() {
|
||||
Description: "SSH/SFTP",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SSH username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SSH port number.",
|
||||
@@ -75,8 +76,9 @@ func init() {
|
||||
Help: "SSH password, leave blank to use ssh-agent.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Name: "key_pem",
|
||||
Help: "Raw PEM-encoded private key.\n\nIf specified, will override key_file parameter.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key_file",
|
||||
Help: "Path to PEM-encoded private key file.\n\nLeave blank or set key-use-agent to use ssh-agent." + env.ShellExpandHelp,
|
||||
@@ -87,6 +89,7 @@ func init() {
|
||||
Only PEM encrypted key files (old OpenSSH format) are supported. Encrypted keys
|
||||
in the new OpenSSH format can't be used.`,
|
||||
IsPassword: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pubkey_file",
|
||||
Help: `Optional path to public key file.
|
||||
@@ -165,7 +168,19 @@ E.g. if shared folders can be found in directories representing volumes:
|
||||
|
||||
E.g. if home directory can be found in a shared folder called "home":
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory
|
||||
|
||||
To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path.
|
||||
|
||||
E.g. the first example above could be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2
|
||||
|
||||
Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home".
|
||||
|
||||
E.g. the second example above should be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -384,6 +399,47 @@ Example:
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ssh",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Path and arguments to external ssh binary.
|
||||
|
||||
Normally rclone will use its internal ssh library to connect to the
|
||||
SFTP server. However it does not implement all possible ssh options so
|
||||
it may be desirable to use an external ssh binary.
|
||||
|
||||
Rclone ignores all the internal config if you use this option and
|
||||
expects you to configure the ssh binary with the user/host/port and
|
||||
any other options you need.
|
||||
|
||||
**Important** The ssh command must log in without asking for a
|
||||
password so needs to be configured with keys or certificates.
|
||||
|
||||
Rclone will run the command supplied either with the additional
|
||||
arguments "-s sftp" to access the SFTP subsystem or with commands such
|
||||
as "md5sum /path/to/file" appended to read checksums.
|
||||
|
||||
Any arguments with spaces in should be surrounded by "double quotes".
|
||||
|
||||
An example setting might be:
|
||||
|
||||
ssh -o ServerAliveInterval=20 user@example.com
|
||||
|
||||
Note that when using an external ssh binary rclone makes a new ssh
|
||||
connection for every hash it calculates.
|
||||
`,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -423,6 +479,8 @@ type Options struct {
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -459,41 +517,16 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
conn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
return ssh.NewClient(c, chans, reqs), nil
|
||||
}
|
||||
|
||||
// conn encapsulates an ssh client and corresponding sftp client
|
||||
type conn struct {
|
||||
sshClient *ssh.Client
|
||||
sshClient sshClient
|
||||
sftpClient *sftp.Client
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (c *conn) wait() {
|
||||
c.err <- c.sshClient.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (c *conn) sendKeepAlive() {
|
||||
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
c.err <- c.sshClient.Wait()
|
||||
}
|
||||
|
||||
// Send keepalives every interval over the ssh connection until done is closed
|
||||
@@ -505,7 +538,7 @@ func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
c.sendKeepAlive()
|
||||
c.sshClient.SendKeepAlive()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
@@ -557,7 +590,11 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if len(f.opt.SSH) == 0 {
|
||||
c.sshClient, err = f.newSSHClientInternal(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
} else {
|
||||
c.sshClient, err = f.newSSHClientExternal()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
|
||||
}
|
||||
@@ -571,7 +608,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
}
|
||||
|
||||
// Set any environment variables on the ssh.Session
|
||||
func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
func (f *Fs) setEnv(s sshSession) error {
|
||||
for _, env := range f.opt.SetEnv {
|
||||
equal := strings.IndexRune(env, '=')
|
||||
if equal < 0 {
|
||||
@@ -588,8 +625,8 @@ func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
func (f *Fs) newSftpClient(client sshClient, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := client.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -662,6 +699,9 @@ func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Getwd request
|
||||
func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
c := *pc
|
||||
if !c.sshClient.CanReuse() {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// work out if this is an expected error
|
||||
@@ -740,6 +780,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opt.SSH) != 0 && (opt.User != "" || opt.Host != "" || opt.Port != "") {
|
||||
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
|
||||
}
|
||||
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
@@ -1012,8 +1056,8 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to get shell session for shell type detection command: %v", err)
|
||||
} else {
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
shellCmd := "echo ${ShellId}%ComSpec%"
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
@@ -1423,8 +1467,8 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
|
||||
fs.Debugf(f, "Running remote command: %s", cmd)
|
||||
err = session.Run(cmd)
|
||||
@@ -1731,6 +1775,9 @@ func (f *Fs) remotePath(remote string) string {
|
||||
func (f *Fs) remoteShellPath(remote string) string {
|
||||
if f.opt.PathOverride != "" {
|
||||
shellPath := path.Join(f.opt.PathOverride, remote)
|
||||
if f.opt.PathOverride[0] == '@' {
|
||||
shellPath = path.Join(strings.TrimPrefix(f.opt.PathOverride, "@"), f.absRoot, remote)
|
||||
}
|
||||
fs.Debugf(f, "Shell path redirected to %q with option path_override", shellPath)
|
||||
return shellPath
|
||||
}
|
||||
@@ -1988,9 +2035,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
}
|
||||
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
|
||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
return fmt.Errorf("Update Create failed: %w", err)
|
||||
}
|
||||
// remove the file if upload failed
|
||||
@@ -2010,14 +2058,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
// Release connection only when upload has finished so we don't upload multiple files on the same connection
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
|
||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
|
||||
@@ -30,3 +30,13 @@ func TestIntegration2(t *testing.T) {
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSFTPRcloneSSH:",
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
73
backend/sftp/ssh.go
Normal file
73
backend/sftp/ssh.go
Normal file
@@ -0,0 +1,73 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import "io"
|
||||
|
||||
// Interfaces for ssh client and session implemented in ssh_internal.go and ssh_external.go
|
||||
|
||||
// An interface for an ssh client to abstract over internal ssh library and external binary
|
||||
type sshClient interface {
|
||||
// Wait blocks until the connection has shut down, and returns the
|
||||
// error causing the shutdown.
|
||||
Wait() error
|
||||
|
||||
// SendKeepAlive sends a keepalive message to keep the connection open
|
||||
SendKeepAlive()
|
||||
|
||||
// Close the connection
|
||||
Close() error
|
||||
|
||||
// NewSession opens a new sshSession for this sshClient. (A
|
||||
// session is a remote execution of a program.)
|
||||
NewSession() (sshSession, error)
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
CanReuse() bool
|
||||
}
|
||||
|
||||
// An interface for an ssh session to abstract over internal ssh library and external binary
|
||||
type sshSession interface {
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
Setenv(name, value string) error
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
Start(cmd string) error
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
StdinPipe() (io.WriteCloser, error)
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
StdoutPipe() (io.Reader, error)
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
RequestSubsystem(subsystem string) error
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
Run(cmd string) error
|
||||
|
||||
// Close the session
|
||||
Close() error
|
||||
|
||||
// Set the stdout
|
||||
SetStdout(io.Writer)
|
||||
|
||||
// Set the stderr
|
||||
SetStderr(io.Writer)
|
||||
}
|
||||
223
backend/sftp/ssh_external.go
Normal file
223
backend/sftp/ssh_external.go
Normal file
@@ -0,0 +1,223 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Implement the sshClient interface for external ssh programs
|
||||
type sshClientExternal struct {
|
||||
f *Fs
|
||||
session *sshSessionExternal
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHClientExternal() (sshClient, error) {
|
||||
return &sshClientExternal{f: f}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s *sshClientExternal) Wait() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s *sshClientExternal) SendKeepAlive() {
|
||||
// Up to the user to configure -o ServerAliveInterval=20 on their ssh connections
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s *sshClientExternal) Close() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Close()
|
||||
}
|
||||
|
||||
// NewSession makes a new external SSH connection
|
||||
func (s *sshClientExternal) NewSession() (sshSession, error) {
|
||||
session := s.f.newSshSessionExternal()
|
||||
if s.session == nil {
|
||||
fs.Debugf(s.f, "ssh external: creating additional session")
|
||||
}
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s *sshClientExternal) CanReuse() bool {
|
||||
if s.session == nil {
|
||||
return true
|
||||
}
|
||||
exited := s.session.exited()
|
||||
canReuse := !exited && s.session.runningSFTP
|
||||
// fs.Debugf(s.f, "ssh external: CanReuse %v, exited=%v runningSFTP=%v", canReuse, exited, s.session.runningSFTP)
|
||||
return canReuse
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = &sshClientExternal{}
|
||||
|
||||
// implement the sshSession interface for external ssh binary
|
||||
type sshSessionExternal struct {
|
||||
f *Fs
|
||||
cmd *exec.Cmd
|
||||
cancel func()
|
||||
startCalled bool
|
||||
runningSFTP bool
|
||||
}
|
||||
|
||||
func (f *Fs) newSshSessionExternal() *sshSessionExternal {
|
||||
s := &sshSessionExternal{
|
||||
f: f,
|
||||
}
|
||||
|
||||
// Make a cancellation function for this to call in Close()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancel = cancel
|
||||
|
||||
// Connect to a remote host and request the sftp subsystem via
|
||||
// the 'ssh' command. This assumes that passwordless login is
|
||||
// correctly configured.
|
||||
ssh := append([]string(nil), s.f.opt.SSH...)
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
func (s *sshSessionExternal) Setenv(name, value string) error {
|
||||
return errors.New("ssh external: can't set environment variables")
|
||||
}
|
||||
|
||||
const requestSubsystem = "***Subsystem***:"
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
func (s *sshSessionExternal) Start(cmd string) error {
|
||||
if s.startCalled {
|
||||
return errors.New("internal error: ssh external: command already running")
|
||||
}
|
||||
s.startCalled = true
|
||||
|
||||
// Adjust the args
|
||||
if strings.HasPrefix(cmd, requestSubsystem) {
|
||||
s.cmd.Args = append(s.cmd.Args, "-s", cmd[len(requestSubsystem):])
|
||||
s.runningSFTP = true
|
||||
} else {
|
||||
s.cmd.Args = append(s.cmd.Args, cmd)
|
||||
s.runningSFTP = false
|
||||
}
|
||||
|
||||
fs.Debugf(s.f, "ssh external: running: %v", fs.SpaceSepList(s.cmd.Args))
|
||||
|
||||
// start the process
|
||||
err := s.cmd.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh external: start process: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
func (s *sshSessionExternal) RequestSubsystem(subsystem string) error {
|
||||
return s.Start(requestSubsystem + subsystem)
|
||||
}
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
func (s *sshSessionExternal) StdinPipe() (io.WriteCloser, error) {
|
||||
rd, err := s.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdin pipe: %w", err)
|
||||
}
|
||||
return rd, nil
|
||||
}
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
func (s *sshSessionExternal) StdoutPipe() (io.Reader, error) {
|
||||
wr, err := s.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdout pipe: %w", err)
|
||||
}
|
||||
return wr, nil
|
||||
}
|
||||
|
||||
// Return whether the command has finished or not
|
||||
func (s *sshSessionExternal) exited() bool {
|
||||
return s.cmd.ProcessState != nil
|
||||
}
|
||||
|
||||
// Wait for the command to exit
|
||||
func (s *sshSessionExternal) Wait() error {
|
||||
if s.exited() {
|
||||
return nil
|
||||
}
|
||||
err := s.cmd.Wait()
|
||||
if err == nil {
|
||||
fs.Debugf(s.f, "ssh external: command exited OK")
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
func (s *sshSessionExternal) Run(cmd string) error {
|
||||
err := s.Start(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Wait()
|
||||
}
|
||||
|
||||
// Close the external ssh
|
||||
func (s *sshSessionExternal) Close() error {
|
||||
fs.Debugf(s.f, "ssh external: close")
|
||||
// Cancel the context which kills the process
|
||||
s.cancel()
|
||||
// Wait for it to finish
|
||||
_ = s.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s *sshSessionExternal) SetStdout(wr io.Writer) {
|
||||
s.cmd.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s *sshSessionExternal) SetStderr(wr io.Writer) {
|
||||
s.cmd.Stderr = wr
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = &sshSessionExternal{}
|
||||
101
backend/sftp/ssh_internal.go
Normal file
101
backend/sftp/ssh_internal.go
Normal file
@@ -0,0 +1,101 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Internal ssh connections with "golang.org/x/crypto/ssh"
|
||||
|
||||
type sshClientInternal struct {
|
||||
srv *ssh.Client
|
||||
}
|
||||
|
||||
// newSSHClientInternal starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (sshClient, error) {
|
||||
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
srv := ssh.NewClient(c, chans, reqs)
|
||||
return sshClientInternal{srv}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s sshClientInternal) Wait() error {
|
||||
return s.srv.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s sshClientInternal) SendKeepAlive() {
|
||||
_, _, err := s.srv.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s sshClientInternal) Close() error {
|
||||
return s.srv.Close()
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s sshClientInternal) CanReuse() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = sshClientInternal{}
|
||||
|
||||
// Thin wrapper for *ssh.Session to implement sshSession interface
|
||||
type sshSessionInternal struct {
|
||||
*ssh.Session
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s sshSessionInternal) SetStdout(wr io.Writer) {
|
||||
s.Session.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s sshSessionInternal) SetStderr(wr io.Writer) {
|
||||
s.Session.Stderr = wr
|
||||
}
|
||||
|
||||
// NewSession makes an sshSession from an sshClient
|
||||
func (s sshClientInternal) NewSession() (sshSession, error) {
|
||||
session, err := s.srv.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sshSessionInternal{Session: session}, nil
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = sshSessionInternal{}
|
||||
@@ -155,7 +155,7 @@ func init() {
|
||||
CheckAuth: checkAuth,
|
||||
})
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload.",
|
||||
Default: defaultUploadCutoff,
|
||||
@@ -182,6 +182,7 @@ standard values here or any folder ID (long hex number ID).`,
|
||||
Value: "top",
|
||||
Help: "Access the home, favorites, and shared folders as well as the connectors.",
|
||||
}},
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Default: defaultChunkSize,
|
||||
@@ -216,7 +217,7 @@ be set manually to something like: https://XXX.sharefile.com
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,8 @@ func init() {
|
||||
|
||||
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
|
||||
Keep default if Sia daemon runs on localhost.`,
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Default: "http://127.0.0.1:9980",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "api_password",
|
||||
Help: `Sia Daemon API Password.
|
||||
|
||||
@@ -41,13 +41,15 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
@@ -57,9 +59,10 @@ func init() {
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
@@ -71,6 +74,7 @@ authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -447,7 +451,8 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
share, dir := f.split("/")
|
||||
if share == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
// Just return empty info rather than an error if called on the root
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
dir = f.toSambaPath(dir)
|
||||
|
||||
@@ -470,6 +475,45 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
var err error
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
@@ -98,9 +98,10 @@ func init() {
|
||||
},
|
||||
}},
|
||||
{
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Name: "access_grant",
|
||||
Help: "Access grant.",
|
||||
Provider: "existing",
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "satellite_address",
|
||||
@@ -120,14 +121,16 @@ func init() {
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Name: "api_key",
|
||||
Help: "API key.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Name: "passphrase",
|
||||
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
|
||||
Provider: newProvider,
|
||||
Sensitive: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
@@ -132,42 +132,50 @@ func init() {
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}, Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "app_id",
|
||||
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Name: "access_key_id",
|
||||
Help: "Sugarsync Access Key ID.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Name: "private_access_key",
|
||||
Help: "Sugarsync Private Access Key.\n\nLeave blank to use rclone's.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "hard_delete",
|
||||
Help: "Permanently delete files if true\notherwise put them in the deleted files.",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "refresh_token",
|
||||
Help: "Sugarsync refresh token.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "authorization",
|
||||
Help: "Sugarsync authorization.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "authorization_expiry",
|
||||
Help: "Sugarsync authorization expiry.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "user",
|
||||
Help: "Sugarsync user.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "root_id",
|
||||
Help: "Sugarsync root id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Name: "deleted_id",
|
||||
Help: "Sugarsync deleted folder id.\n\nLeave blank normally, will be auto configured by rclone.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
|
||||
@@ -116,11 +116,13 @@ func init() {
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Name: "user",
|
||||
Help: "User name to log in (OS_USERNAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Name: "key",
|
||||
Help: "API key or password (OS_PASSWORD).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth",
|
||||
Help: "Authentication URL for server (OS_AUTH_URL).",
|
||||
@@ -147,20 +149,25 @@ func init() {
|
||||
Help: "Blomp Cloud Storage",
|
||||
}},
|
||||
}, {
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Name: "user_id",
|
||||
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Name: "domain",
|
||||
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Name: "tenant",
|
||||
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Name: "tenant_id",
|
||||
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Name: "tenant_domain",
|
||||
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region name - optional (OS_REGION_NAME).",
|
||||
@@ -168,17 +175,21 @@ func init() {
|
||||
Name: "storage_url",
|
||||
Help: "Storage URL - optional (OS_STORAGE_URL).",
|
||||
}, {
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Name: "auth_token",
|
||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Name: "application_credential_id",
|
||||
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Name: "application_credential_name",
|
||||
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Name: "application_credential_secret",
|
||||
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "auth_version",
|
||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
|
||||
|
||||
@@ -43,8 +43,9 @@ func init() {
|
||||
Description: "Uptobox",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Help: "Your access token.\n\nGet it from https://uptobox.com/my_account.",
|
||||
Name: "access_token",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Help: "Set to make uploaded files private",
|
||||
Name: "private",
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -41,10 +40,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) getChunksUploadURL() string {
|
||||
return strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
|
||||
}
|
||||
|
||||
func (o *Object) getChunksUploadDir() (string, error) {
|
||||
hasher := md5.New()
|
||||
_, err := hasher.Write([]byte(o.filePath()))
|
||||
@@ -55,12 +50,16 @@ func (o *Object) getChunksUploadDir() (string, error) {
|
||||
return uploadDir, nil
|
||||
}
|
||||
|
||||
func (f *Fs) verifyChunkConfig() error {
|
||||
if f.opt.ChunkSize != 0 && !validateNextCloudChunkedURL.MatchString(f.endpointURL) {
|
||||
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
|
||||
func (f *Fs) getChunksUploadURL() (string, error) {
|
||||
submatch := nextCloudURLRegex.FindStringSubmatch(f.endpointURL)
|
||||
if submatch == nil {
|
||||
return "", errors.New("the remote url looks incorrect. Note that nextcloud chunked uploads require you to use the /dav/files/USER endpoint instead of /webdav. Please check 'rclone config show remotename' to verify that the url field ends in /dav/files/USERNAME")
|
||||
}
|
||||
|
||||
return nil
|
||||
baseURL, user := submatch[1], submatch[2]
|
||||
chunksUploadURL := fmt.Sprintf("%s/dav/uploads/%s/", baseURL, user)
|
||||
|
||||
return chunksUploadURL, nil
|
||||
}
|
||||
|
||||
func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
|
||||
@@ -122,7 +121,7 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
if _, err := in.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -204,7 +203,7 @@ func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) erro
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -96,15 +96,17 @@ func init() {
|
||||
Help: "Other site/service or software",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Name: "user",
|
||||
Help: "User name.\n\nIn case NTLM authentication is used, the username should be in the format 'Domain\\User'.",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Name: "bearer_token",
|
||||
Help: "Bearer token instead of user/pass (e.g. a Macaroon).",
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "bearer_token_command",
|
||||
Help: "Command to run to get a bearer token.",
|
||||
@@ -569,7 +571,8 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var validateNextCloudChunkedURL = regexp.MustCompile(`^.*/dav/files/[^/]+/?$`)
|
||||
// The WebDAV url can optionally be suffixed with a path. This suffix needs to be ignored for determining the temporary upload directory of chunks.
|
||||
var nextCloudURLRegex = regexp.MustCompile(`^(.*)/dav/files/([^/]+)`)
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
@@ -592,11 +595,18 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.propsetMtime = true
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = true
|
||||
if err := f.verifyChunkConfig(); err != nil {
|
||||
return err
|
||||
|
||||
if f.opt.ChunkSize == 0 {
|
||||
fs.Logf(nil, "Chunked uploads are disabled because nextcloud_chunk_size is set to 0")
|
||||
} else {
|
||||
chunksUploadURL, err := f.getChunksUploadURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.chunksUploadURL = chunksUploadURL
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
}
|
||||
f.chunksUploadURL = f.getChunksUploadURL()
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
|
||||
@@ -66,6 +66,7 @@ docs = [
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
@@ -114,7 +115,7 @@ commands_order = [
|
||||
ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"donate.md",
|
||||
"sponsor.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
|
||||
@@ -22,8 +22,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable")
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable", "")
|
||||
}
|
||||
|
||||
// printValue formats uv to be output
|
||||
@@ -95,6 +95,7 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser", "")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -36,6 +36,7 @@ link in default browser automatically.
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
// "groups": "",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
|
||||
@@ -24,8 +24,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name", "")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -60,6 +60,7 @@ Note to run these commands on a running backend then see
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
|
||||
@@ -98,16 +98,16 @@ var Opt Options
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."))
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"))
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"))
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
@@ -117,6 +117,7 @@ var commandDefinition = &cobra.Command{
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -27,12 +27,12 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters")
|
||||
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters")
|
||||
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)")
|
||||
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters")
|
||||
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "", separator, "Separator to use between objects when printing multiple files")
|
||||
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters", "")
|
||||
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters", "")
|
||||
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve)", "")
|
||||
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters", "")
|
||||
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing", "")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "", separator, "Separator to use between objects when printing multiple files", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -73,6 +73,7 @@ files, use:
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
|
||||
@@ -33,20 +33,20 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash")
|
||||
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type")
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash", "")
|
||||
flags.StringVarP(cmdFlags, &checkFileHashType, "checkfile", "C", checkFileHashType, "Treat source:path as a SUM file with hashes of given type", "")
|
||||
AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
// AddFlags adds the check flags to the cmdFlags command
|
||||
func AddFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
flags.StringVarP(cmdFlags, &combined, "combined", "", combined, "Make a combined report of changes to this file")
|
||||
flags.StringVarP(cmdFlags, &missingOnSrc, "missing-on-src", "", missingOnSrc, "Report all files missing from the source to this file")
|
||||
flags.StringVarP(cmdFlags, &missingOnDst, "missing-on-dst", "", missingOnDst, "Report all files missing from the destination to this file")
|
||||
flags.StringVarP(cmdFlags, &match, "match", "", match, "Report all matching files to this file")
|
||||
flags.StringVarP(cmdFlags, &differ, "differ", "", differ, "Report all non-matching files to this file")
|
||||
flags.StringVarP(cmdFlags, &errFile, "error", "", errFile, "Report all files with errors (hashing or reading) to this file")
|
||||
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote", "")
|
||||
flags.StringVarP(cmdFlags, &combined, "combined", "", combined, "Make a combined report of changes to this file", "")
|
||||
flags.StringVarP(cmdFlags, &missingOnSrc, "missing-on-src", "", missingOnSrc, "Report all files missing from the source to this file", "")
|
||||
flags.StringVarP(cmdFlags, &missingOnDst, "missing-on-dst", "", missingOnDst, "Report all files missing from the destination to this file", "")
|
||||
flags.StringVarP(cmdFlags, &match, "match", "", match, "Report all matching files to this file", "")
|
||||
flags.StringVarP(cmdFlags, &differ, "differ", "", differ, "Report all non-matching files to this file", "")
|
||||
flags.StringVarP(cmdFlags, &errFile, "error", "", errFile, "Report all files with errors (hashing or reading) to this file", "")
|
||||
}
|
||||
|
||||
// FlagsHelp describes the flags for the help
|
||||
@@ -158,6 +158,9 @@ to check all the data.
|
||||
If you supply the |--checkfile HASH| flag with a valid hash name,
|
||||
the |source:path| must point to a text file in the SUM format.
|
||||
`, "|", "`") + FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Filter,Listing,Check",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
var (
|
||||
|
||||
@@ -19,7 +19,7 @@ var download = false
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents")
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by hashing the contents", "")
|
||||
check.AddFlags(cmdFlags)
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ Note that hash values in the SUM file are treated as case insensitive.
|
||||
`, "|", "`") + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 3, command, args)
|
||||
|
||||
@@ -22,6 +22,7 @@ versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
15
cmd/cmd.go
15
cmd/cmd.go
@@ -35,6 +35,7 @@ import (
|
||||
fslog "github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
fssync "github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/buildinfo"
|
||||
"github.com/rclone/rclone/lib/exitcode"
|
||||
@@ -47,13 +48,13 @@ import (
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
|
||||
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file", "Debugging")
|
||||
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file", "Debugging")
|
||||
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g. 500ms, 60s, 5m (0 to disable)", "Logging")
|
||||
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second", "Logging")
|
||||
version bool
|
||||
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
|
||||
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)")
|
||||
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail", "Config")
|
||||
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable)", "Config")
|
||||
// Errors
|
||||
errorCommandNotFound = errors.New("command not found")
|
||||
errorUncategorized = errors.New("uncategorized error")
|
||||
@@ -501,6 +502,8 @@ func resolveExitCode(err error) {
|
||||
os.Exit(exitcode.UncategorizedError)
|
||||
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
||||
os.Exit(exitcode.TransferExceeded)
|
||||
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
||||
os.Exit(exitcode.DurationExceeded)
|
||||
case fserrors.ShouldRetry(err):
|
||||
os.Exit(exitcode.RetryError)
|
||||
case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err):
|
||||
|
||||
@@ -26,6 +26,7 @@ func init() {
|
||||
configCommand.AddCommand(configTouchCommand)
|
||||
configCommand.AddCommand(configPathsCommand)
|
||||
configCommand.AddCommand(configShowCommand)
|
||||
configCommand.AddCommand(configRedactedCommand)
|
||||
configCommand.AddCommand(configDumpCommand)
|
||||
configCommand.AddCommand(configProvidersCommand)
|
||||
configCommand.AddCommand(configCreateCommand)
|
||||
@@ -60,7 +61,10 @@ var configEditCommand = &cobra.Command{
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: configCommand.Run,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.EditConfig(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
var configFileCommand = &cobra.Command{
|
||||
@@ -118,6 +122,35 @@ var configShowCommand = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var configRedactedCommand = &cobra.Command{
|
||||
Use: "redacted [<remote>]",
|
||||
Short: `Print redacted (decrypted) config file, or the redacted config for a single remote.`,
|
||||
Long: `This prints a redacted copy of the config file, either the
|
||||
whole config file or for a given remote.
|
||||
|
||||
The config file will be redacted by replacing all passwords and other
|
||||
sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.64",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
config.ShowRedactedConfig()
|
||||
} else {
|
||||
name := strings.TrimRight(args[0], ":")
|
||||
config.ShowRedactedRemote(name)
|
||||
}
|
||||
fmt.Println("### Double check the config for sensitive info before posting publicly")
|
||||
},
|
||||
}
|
||||
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
@@ -288,13 +321,13 @@ func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.Co
|
||||
|
||||
func init() {
|
||||
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer", "Config")
|
||||
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions", "Config")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue", "Config")
|
||||
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue", "Config")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -450,7 +483,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
flags.BoolVarP(configUserInfoCommand.Flags(), &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(configUserInfoCommand.Flags(), &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
}
|
||||
|
||||
var configUserInfoCommand = &cobra.Command{
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -83,6 +83,9 @@ recently very efficiently like this:
|
||||
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"groups": "Copy,Filter,Listing,Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -48,6 +48,7 @@ the destination.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
"groups": "Copy,Filter,Listing,Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -25,11 +25,11 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path")
|
||||
flags.BoolVarP(cmdFlags, &headerFilename, "header-filename", "", headerFilename, "Get the file name from the Content-Disposition header")
|
||||
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file")
|
||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the URL and use it for destination file path", "")
|
||||
flags.BoolVarP(cmdFlags, &headerFilename, "header-filename", "", headerFilename, "Get the file name from the Content-Disposition header", "")
|
||||
flags.BoolVarP(cmdFlags, &printFilename, "print-filename", "p", printFilename, "Print the resulting name from --auto-filename", "")
|
||||
flags.BoolVarP(cmdFlags, &noClobber, "no-clobber", "", noClobber, "Prevent overwriting file with same name", "")
|
||||
flags.BoolVarP(cmdFlags, &stdout, "stdout", "", stdout, "Write the output to stdout rather than a file", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -53,6 +53,7 @@ will cause the output to be written to standard output.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
"groups": "Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
@@ -49,6 +49,7 @@ After it has run it will log the status of the encryptedremote:.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
"groups": "Filter,Listing,Check",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
|
||||
flags.BoolVarP(cmdFlags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -20,8 +20,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlag := commandDefinition.Flags()
|
||||
flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename")
|
||||
flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find identical hashes rather than names")
|
||||
flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename", "")
|
||||
flags.BoolVarP(cmdFlag, &byHash, "by-hash", "", false, "Find identical hashes rather than names", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -137,6 +137,7 @@ Or
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
@@ -18,7 +18,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact")
|
||||
flags.BoolVarP(cmdFlags, &rmdirs, "rmdirs", "", rmdirs, "rmdirs removes empty directories but leaves root intact", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -55,6 +55,7 @@ delete all files bigger than 100 MiB.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important,Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -25,6 +25,7 @@ it will always be removed.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
44
cmd/genautocomplete/genautocomplete_powershell.go
Normal file
44
cmd/genautocomplete/genautocomplete_powershell.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
completionDefinition.AddCommand(powershellCommandDefinition)
|
||||
}
|
||||
|
||||
var powershellCommandDefinition = &cobra.Command{
|
||||
Use: "powershell [output_file]",
|
||||
Short: `Output powershell completion script for rclone.`,
|
||||
Long: `
|
||||
Generate the autocompletion script for powershell.
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
If output_file is "-" or missing, then the output will be written to stdout.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||
err := cmd.Root.GenPowerShellCompletion(os.Stdout)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
err := cmd.Root.GenPowerShellCompletionFile(args[0])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package gendocs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@@ -13,10 +14,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -88,6 +89,7 @@ rclone.org website.`,
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var aliases []string
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
@@ -95,6 +97,7 @@ rclone.org website.`,
|
||||
Short: root.Short,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
aliases = append(aliases, root.Aliases...)
|
||||
for _, c := range root.Commands() {
|
||||
addCommandDetails(c)
|
||||
}
|
||||
@@ -126,10 +129,6 @@ rclone.org website.`,
|
||||
return "/commands/" + strings.ToLower(base) + "/"
|
||||
}
|
||||
|
||||
// Hide all of the root entries flags
|
||||
cmd.Root.Flags().VisitAll(func(flag *pflag.Flag) {
|
||||
flag.Hidden = true
|
||||
})
|
||||
err = doc.GenMarkdownTreeCustom(cmd.Root, out, prepender, linkHandler)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -143,15 +142,50 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
name := filepath.Base(path)
|
||||
cmd, ok := commands[name]
|
||||
if !ok {
|
||||
// Avoid man pages which are for aliases. This is a bit messy!
|
||||
for _, alias := range aliases {
|
||||
if strings.Contains(name, alias) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("didn't find command for %q", name)
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
doc := string(b)
|
||||
doc = strings.Replace(doc, "\n### SEE ALSO", `
|
||||
|
||||
var out strings.Builder
|
||||
if groupsString := cmd.Annotations["groups"]; groupsString != "" {
|
||||
groups := flags.All.Include(groupsString)
|
||||
for _, group := range groups.Groups {
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "\n### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = fmt.Fprintln(&out, "```")
|
||||
}
|
||||
}
|
||||
}
|
||||
_, _ = out.WriteString(`
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
### SEE ALSO`, 1)
|
||||
`)
|
||||
|
||||
startCut := strings.Index(doc, `### Options inherited from parent commands`)
|
||||
endCut := strings.Index(doc, `## SEE ALSO`)
|
||||
if startCut < 0 || endCut < 0 {
|
||||
if name == "rclone.md" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + doc[endCut:]
|
||||
// outdent all the titles by one
|
||||
doc = outdentTitle.ReplaceAllString(doc, `$1`)
|
||||
err = os.WriteFile(path, []byte(doc), 0777)
|
||||
|
||||
@@ -31,10 +31,10 @@ func init() {
|
||||
|
||||
// AddHashsumFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashsumFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
|
||||
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
|
||||
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
|
||||
flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote")
|
||||
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum", "")
|
||||
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal", "")
|
||||
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them", "")
|
||||
flags.BoolVarP(cmdFlags, &DownloadFlag, "download", "", DownloadFlag, "Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote", "")
|
||||
}
|
||||
|
||||
// GetHashsumOutput opens and closes the output file when using the output-file flag
|
||||
@@ -114,6 +114,7 @@ Note that hash names are case insensitive and values are output in lower case.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
|
||||
51
cmd/help.go
51
cmd/help.go
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter/filterflags"
|
||||
"github.com/rclone/rclone/fs/log/logflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
@@ -113,7 +114,7 @@ var helpFlags = &cobra.Command{
|
||||
Short: "Show the global flags for rclone",
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
if len(args) > 0 {
|
||||
re, err := regexp.Compile(args[0])
|
||||
re, err := regexp.Compile(`(?i)` + args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to compile flags regexp: %v", err)
|
||||
}
|
||||
@@ -181,7 +182,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
Root.Flags().BoolVarP(&version, "version", "V", false, "Print the version number")
|
||||
|
||||
cobra.AddTemplateFunc("showGlobalFlags", func(cmd *cobra.Command) bool {
|
||||
return cmd.CalledAs() == "flags"
|
||||
return cmd.CalledAs() == "flags" || cmd.Annotations["groups"] != ""
|
||||
})
|
||||
cobra.AddTemplateFunc("showCommands", func(cmd *cobra.Command) bool {
|
||||
return cmd.CalledAs() != "flags"
|
||||
@@ -191,15 +192,21 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
// "rclone help" (which shows the global help)
|
||||
return cmd.CalledAs() != "rclone" && cmd.CalledAs() != ""
|
||||
})
|
||||
cobra.AddTemplateFunc("backendFlags", func(cmd *cobra.Command, include bool) *pflag.FlagSet {
|
||||
backendFlagSet := pflag.NewFlagSet("Backend Flags", pflag.ExitOnError)
|
||||
cobra.AddTemplateFunc("flagGroups", func(cmd *cobra.Command) []*flags.Group {
|
||||
// Add the backend flags and check all flags
|
||||
backendGroup := flags.All.NewGroup("Backend", "Backend only flags. These can be set in the config file also.")
|
||||
allRegistered := flags.All.AllRegistered()
|
||||
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||
matched := flagsRe == nil || flagsRe.MatchString(flag.Name)
|
||||
if _, ok := backendFlags[flag.Name]; matched && ok == include {
|
||||
backendFlagSet.AddFlag(flag)
|
||||
if _, ok := backendFlags[flag.Name]; ok {
|
||||
backendGroup.Add(flag)
|
||||
} else if _, ok := allRegistered[flag]; ok {
|
||||
// flag is in a group already
|
||||
} else {
|
||||
fs.Errorf(nil, "Flag --%s is unknown", flag.Name)
|
||||
}
|
||||
})
|
||||
return backendFlagSet
|
||||
groups := flags.All.Filter(flagsRe).Include(cmd.Annotations["groups"])
|
||||
return groups.Groups
|
||||
})
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
// rootCmd.SetHelpTemplate(helpTemplate)
|
||||
@@ -233,11 +240,13 @@ Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "he
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if and (showGlobalFlags .) .HasAvailableInheritedFlags}}
|
||||
|
||||
Global Flags:
|
||||
{{(backendFlags . false).FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
# {{ .Name }} Flags
|
||||
|
||||
Backend Flags:
|
||||
{{(backendFlags . true).FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
|
||||
{{ .Help }}
|
||||
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ end }}{{ end }}
|
||||
|
||||
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}
|
||||
@@ -255,24 +264,18 @@ description: "Rclone Global Flags"
|
||||
# Global Flags
|
||||
|
||||
This describes the global flags available to every rclone command
|
||||
split into two groups, non backend and backend flags.
|
||||
split into groups.
|
||||
|
||||
## Non Backend Flags
|
||||
{{ range flagGroups . }}{{ if .Flags.HasFlags }}
|
||||
## {{ .Name }}
|
||||
|
||||
These flags are available for every command.
|
||||
{{ .Help }}
|
||||
|
||||
` + "```" + `
|
||||
{{(backendFlags . false).FlagUsages | trimTrailingWhitespaces}}
|
||||
{{ .Flags.FlagUsages | trimTrailingWhitespaces}}
|
||||
` + "```" + `
|
||||
|
||||
## Backend Flags
|
||||
|
||||
These flags are available for every command. They control the backends
|
||||
and may be set in the config file.
|
||||
|
||||
` + "```" + `
|
||||
{{(backendFlags . true).FlagUsages | trimTrailingWhitespaces}}
|
||||
` + "```" + `
|
||||
{{ end }}{{ end }}
|
||||
`
|
||||
|
||||
// show all the backends
|
||||
|
||||
@@ -20,8 +20,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.FVarP(cmdFlags, &expire, "expire", "", "The amount of time that the link will be valid")
|
||||
flags.BoolVarP(cmdFlags, &unlink, "unlink", "", unlink, "Remove existing public link to file/folder")
|
||||
flags.FVarP(cmdFlags, &expire, "expire", "", "The amount of time that the link will be valid", "")
|
||||
flags.BoolVarP(cmdFlags, &unlink, "unlink", "", unlink, "Remove existing public link to file/folder", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -19,7 +19,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names")
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -31,6 +31,9 @@ Eg
|
||||
37600 fubuwic
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -49,6 +49,9 @@ Or
|
||||
If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `.
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
ci := fs.GetConfig(context.Background())
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -31,15 +31,15 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format")
|
||||
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names")
|
||||
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
|
||||
flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files")
|
||||
flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories")
|
||||
flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format")
|
||||
flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names")
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing")
|
||||
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details", "")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format", "")
|
||||
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names", "")
|
||||
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "")
|
||||
flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files", "")
|
||||
flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories", "")
|
||||
flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format", "")
|
||||
flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names", "")
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -144,6 +144,7 @@ those only (without traversing the whole directory structure):
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -23,17 +23,17 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer)")
|
||||
flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up)")
|
||||
flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up)")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "", false, "Show the encrypted names")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object")
|
||||
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing")
|
||||
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing")
|
||||
flags.BoolVarP(cmdFlags, &opt.Metadata, "metadata", "M", false, "Add metadata to the listing")
|
||||
flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated)")
|
||||
flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file")
|
||||
flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer)", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up)", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.NoMimeType, "no-mimetype", "", false, "Don't read the mime type (can speed things up)", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "", false, "Show the encrypted names", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing", "")
|
||||
flags.BoolVarP(cmdFlags, &opt.Metadata, "metadata", "M", false, "Add metadata to the listing", "")
|
||||
flags.StringArrayVarP(cmdFlags, &opt.HashTypes, "hash-type", "", nil, "Show only this hash type (may be repeated)", "")
|
||||
flags.BoolVarP(cmdFlags, &statOnly, "stat", "", false, "Just return the info for the pointed to file", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -114,6 +114,7 @@ can be processed line by line as each item is written one to a line.
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -33,6 +33,7 @@ Eg
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -40,6 +40,7 @@ as a relative path).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
|
||||
@@ -18,6 +18,9 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "mkdir remote:path",
|
||||
Short: `Make the path if it doesn't already exist.`,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
|
||||
@@ -4,22 +4,20 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/artyom/mtab"
|
||||
"github.com/moby/sys/mountinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
mtabPath = "/proc/mounts"
|
||||
pollInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// CheckMountEmpty checks if folder is not already a mountpoint.
|
||||
// On Linux we use the OS-specific /proc/mount API so the check won't access the path.
|
||||
// On Linux we use the OS-specific /proc/self/mountinfo API so the check won't access the path.
|
||||
// Directories marked as "mounted" by autofs are considered not mounted.
|
||||
func CheckMountEmpty(mountpoint string) error {
|
||||
const msg = "directory already mounted, use --allow-non-empty to mount anyway: %s"
|
||||
@@ -29,43 +27,48 @@ func CheckMountEmpty(mountpoint string) error {
|
||||
return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
|
||||
}
|
||||
|
||||
entries, err := mtab.Entries(mtabPath)
|
||||
infos, err := mountinfo.GetMounts(mountinfo.SingleEntryFilter(mountpointAbs))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
return fmt.Errorf("cannot get mounts: %w", err)
|
||||
}
|
||||
|
||||
foundAutofs := false
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs {
|
||||
if entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
for _, info := range infos {
|
||||
if info.FSType != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
}
|
||||
// It isn't safe to list an autofs in the middle of mounting
|
||||
if foundAutofs {
|
||||
return nil
|
||||
}
|
||||
|
||||
return checkMountEmpty(mountpoint)
|
||||
}
|
||||
|
||||
// CheckMountReady checks whether mountpoint is mounted by rclone.
|
||||
// Only mounts with type "rclone" or "fuse.rclone" count.
|
||||
func CheckMountReady(mountpoint string) error {
|
||||
const msg = "mount not ready: %s"
|
||||
|
||||
mountpointAbs, err := filepath.Abs(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
|
||||
}
|
||||
entries, err := mtab.Entries(mtabPath)
|
||||
|
||||
infos, err := mountinfo.GetMounts(mountinfo.SingleEntryFilter(mountpointAbs))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
return fmt.Errorf("cannot get mounts: %w", err)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs && strings.Contains(entry.Type, "rclone") {
|
||||
|
||||
for _, info := range infos {
|
||||
if strings.Contains(info.FSType, "rclone") {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("mount not ready")
|
||||
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
|
||||
// WaitMountReady waits until mountpoint is mounted by rclone.
|
||||
|
||||
@@ -125,31 +125,31 @@ var Opt Options
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOption("mount", &Opt)
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v")
|
||||
flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp (repeat if required)")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)")
|
||||
flags.BoolVarP(flagSet, &Opt.DebugFUSE, "debug-fuse", "", Opt.DebugFUSE, "Debug the FUSE internals - needs -v", "Mount")
|
||||
flags.DurationVarP(flagSet, &Opt.AttrTimeout, "attr-timeout", "", Opt.AttrTimeout, "Time for which file/directory attributes are cached", "Mount")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp (repeat if required)", "Mount")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)", "Mount")
|
||||
// Non-Windows only
|
||||
flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)")
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
|
||||
flags.FVarP(flagSet, &Opt.CaseInsensitive, "mount-case-insensitive", "", "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)")
|
||||
flags.BoolVarP(flagSet, &Opt.Daemon, "daemon", "", Opt.Daemon, "Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)", "Mount")
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonTimeout, "daemon-timeout", "", Opt.DaemonTimeout, "Time limit for rclone to respond to kernel (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.DefaultPermissions, "default-permissions", "", Opt.DefaultPermissions, "Makes kernel enforce access control based on the file mode (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowNonEmpty, "allow-non-empty", "", Opt.AllowNonEmpty, "Allow mounting over a non-empty directory (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowRoot, "allow-root", "", Opt.AllowRoot, "Allow access to root user (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AllowOther, "allow-other", "", Opt.AllowOther, "Allow access to other users (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)", "Mount")
|
||||
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)", "Mount")
|
||||
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path", "Mount")
|
||||
flags.FVarP(flagSet, &Opt.CaseInsensitive, "mount-case-insensitive", "", "Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto)", "Mount")
|
||||
// Windows and OSX
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
|
||||
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)", "Mount")
|
||||
// OSX only
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleDouble, "noappledouble", "", Opt.NoAppleDouble, "Ignore Apple Double (._) and .DS_Store files (supported on OSX only)", "Mount")
|
||||
flags.BoolVarP(flagSet, &Opt.NoAppleXattr, "noapplexattr", "", Opt.NoAppleXattr, "Ignore all \"com.apple.*\" extended attributes (supported on OSX only)", "Mount")
|
||||
// Windows only
|
||||
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)")
|
||||
flags.BoolVarP(flagSet, &Opt.NetworkMode, "network-mode", "", Opt.NetworkMode, "Mount as remote network drive, instead of fixed disk drive (supported on Windows only)", "Mount")
|
||||
// Unix only
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)")
|
||||
flags.DurationVarP(flagSet, &Opt.DaemonWait, "daemon-wait", "", Opt.DaemonWait, "Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows)", "Mount")
|
||||
}
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
@@ -161,6 +161,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -21,8 +21,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -62,6 +62,7 @@ can speed transfers up greatly.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.19",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -51,6 +51,7 @@ successful transfer.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -77,6 +77,7 @@ the remote you can also use the [size](/commands/rclone_size/) command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
"groups": "Filter,Listing",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -26,6 +26,9 @@ delete files. To delete empty directories only, use command
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
|
||||
16
cmd/rc/rc.go
16
cmd/rc/rc.go
@@ -36,14 +36,14 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result")
|
||||
flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control")
|
||||
flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args")
|
||||
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control")
|
||||
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control")
|
||||
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array")
|
||||
flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array")
|
||||
flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set, don't output the JSON result", "")
|
||||
flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control", "")
|
||||
flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args", "")
|
||||
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control", "")
|
||||
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control", "")
|
||||
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP", "")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "opt", "o", options, "Option in the form name=value or name placed in the \"opt\" array", "")
|
||||
flags.StringArrayVarP(cmdFlags, &arguments, "arg", "a", arguments, "Argument placed in the \"arg\" array", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -20,7 +20,7 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate")
|
||||
flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -59,6 +59,7 @@ off caching it locally and then ` + "`rclone move`" + ` it to the
|
||||
destination which can use retries.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -35,6 +35,7 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
` + libhttp.Help(rcflags.FlagPrefix) + libhttp.TemplateHelp(rcflags.FlagPrefix) + libhttp.AuthHelp(rcflags.FlagPrefix),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.45",
|
||||
"groups": "RC",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
|
||||
@@ -24,6 +24,9 @@ with option ` + "`--rmdirs`" + `) to do that.
|
||||
|
||||
To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
|
||||
@@ -40,6 +40,7 @@ command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
"groups": "Important",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user