1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-03 00:53:43 +00:00

Compare commits

..

92 Commits

Author SHA1 Message Date
Cnly
add9976ea9 fs/config: make --dump imply -vv 2021-06-22 00:33:46 +08:00
Ivan Andreev
80bccacd83 fs: split overgrown fs.go (#5405)
Nothing is added or removed and no package is renamed by this change.
Just rearrange definitions between source files in the fs directory.

New source files:
- types.go      Filesystem types and interfaces
- features.go   Features and optional interfaces
- registry.go   Filesystem registry and backend options
- newfs.go      NewFs and its helpers
- configmap.go  Getters and Setters for ConfigMap
- pacer.go      Pacer with logging and calculator
The final fs.go contains what is left.

Also rename options.go to open_options.go
to dissociate from registry options.
2021-06-14 14:42:49 +03:00
Nick Craig-Wood
3349b055f5 fichier: fix move of files in the same directory
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
bef0c23e00 fichier: make error messages report text from the API
See: https://forum.rclone.org/t/1fichier-rclone-does-not-allow-to-rename-files-and-folders-when-you-mount-a-1fichier-disk-drive/24726/24
2021-06-11 14:21:23 +01:00
Nick Craig-Wood
84201ed891 zoho: improve wording for region - fixes #5377 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
04608428bf Add Florian Penzkofer to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
6aaa06d7be Add darrenrhs to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e53bad5353 Add Reid Buzby to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
f5397246eb Add Chris Lu to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
b8b73f2656 Add database64128 to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
96b67ce0ec Add Tyson Moore to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
e2beeffd76 Add Tom to contributors 2021-06-11 14:21:23 +01:00
Nick Craig-Wood
30b949642d Add acsfer to contributors 2021-06-11 14:21:23 +01:00
Florian Penzkofer
92b3518c78 fichier: support downloading password protected files and folders 2021-06-10 19:00:26 +02:00
Ivan Andreev
062919e08c deprecate cache backend (#5382) 2021-06-10 19:52:55 +03:00
darrenrhs
654f5309b0 docs: drive: include requirement to publish app in step-by-step - fixes #5393 2021-06-10 17:00:52 +01:00
albertony
318fa4472b docs: fix incorrect syntax in config update example 2021-06-10 08:59:18 +02:00
Reid Buzby
5104e24153 docs: fix incorrect token type for yandex
https://forum.rclone.org/t/yandex-documentation/24445/2
2021-06-09 13:04:55 +02:00
albertony
9d87a5192d docs: fix code section formatting in filtering docs
Fixes #5387
2021-06-08 18:53:18 +02:00
Ivan Andreev
29f967dba3 make commanddocs for v1.56 (#5383) 2021-06-08 18:57:04 +03:00
Chris Lu
1f846c18d4 s3: Add SeaweedFS 2021-06-08 09:59:57 +01:00
albertony
41f561bf26 jottacloud: fix invalid url in output from link command
Fixes #5370
2021-05-31 10:40:21 +02:00
database64128
df60e6323c 🧹 GCS: Clean up time format constants 2021-05-28 14:44:50 +01:00
database64128
58006a925a 📑 GCS: Update docs on mtime
- Mention the new modification time behavior and the modify window issue.
- Unify markdown format.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
database64128
ee2fac1855 🕰️ GCS: Compatible with gsutil's mtime metadata
- Write `goog-reserved-file-mtime` in addition to `mtime`.
- Fallback to `goog-reserved-file-mtime` if `mtime` doesn't exist.
- ref rclone/rclone#5331
2021-05-28 14:44:50 +01:00
Tyson Moore
2188fe38e5 docs: add caveat about DSCP on Windows 2021-05-28 13:43:38 +01:00
Tyson Moore
b5f8f0973b fshttp: implement graceful DSCP error handling 2021-05-28 13:43:38 +01:00
Tyson Moore
85b8ba9469 fshttp: rework address parsing for DSCP (fixes #5293) 2021-05-28 13:43:38 +01:00
Tom
04a1f673f0 serve sftp: add --stdio flag to serve via stdio - fixes #5311 2021-05-28 13:40:32 +01:00
albertony
0574ebf44a vfs: do not print notice about missing poll-interval support when set to 0
Fixes #5359
2021-05-28 13:09:15 +02:00
albertony
22e86ce335 vfs: fix that umask option cannot be set as environment variable (#5351)
Fixes #5350
2021-05-22 20:48:02 +02:00
acsfer
c9fce20249 tardigrade: add warning about too many open files - Fixes #5310 2021-05-21 20:04:57 +01:00
Ivan Andreev
5b6f637461 fs/hash: align hashsum names and update documentation (#5339)
- Unify all hash names as lowercase alphanumerics without punctuation.
- Legacy names continue to work but disappear from docs, they can be depreciated or dropped later.
- Make rclone hashsum print supported hash list in case of wrong spelling.
- Update documentation.

Fixes #5071
Fixes #4841
2021-05-21 17:32:33 +03:00
albertony
07f2f3a62e docs: fix link to paths on windows section 2021-05-19 22:11:17 +02:00
albertony
6dc190ec93 docs: mention that network/unc paths are supported in local filesystem on windows 2021-05-19 22:11:17 +02:00
Nick Craig-Wood
71f75a1d95 operations: fix tests work on compress by supplying incompressible data 2021-05-18 17:38:32 +01:00
Nick Craig-Wood
1b44035e45 filefabric: fix listing after change of from field from "int" to int. 2021-05-18 17:11:16 +01:00
Nick Craig-Wood
054b467f32 check: log the hash in use like cryptcheck does
See: https://forum.rclone.org/t/does-a-rclone-check-on-similar-remotes-still-compute-hashes/24288/15
2021-05-18 16:21:19 +01:00
Ivan Andreev
23da913d03 dbhashsum: drop command deprecated a year ago - #4837 (#5336)
dbhashsum was deprecated in rclone 1.52 on 2020-05-27
this patch drops the command completely since rclone 1.56
2021-05-18 12:27:17 +03:00
Nick Craig-Wood
c0cda087a8 s3: don't check to see if remote is object if it ends with /
Before this change, rclone would always check the root to see if it
was an object.

This change doesn't check to see if the root is an object if the path
ends with a /

This avoids a transaction where rclone HEADs the path to see if it
exists.

See #4990
2021-05-17 16:43:34 +01:00
Nick Craig-Wood
1773717a47 fs/march: improve errors when root source/destination doesn't exist
See: https://forum.rclone.org/t/rclone-attempts-to-read-files-in-the-destination-directory-when-the-source-doesnt-exist/23412
2021-05-17 16:38:03 +01:00
Nick Craig-Wood
04308dcaa1 local: add --local-unicode-normalization (and remove --local-no-unicode-normalization)
macOS stores files in NFD form and transferring them like this to some
systems causes the Korean language to display incorrectly.

This adds the flag --local-unicode-normalization to optionally
normalize the file names to NFC.

This also removes the (long deprecated) --local-no-unicode-normalization flag

See: https://forum.rclone.org/t/support-for-korean-jaso-conversion/19435
2021-05-17 16:34:25 +01:00
Nick Craig-Wood
06f27384dd b2: fix versions and .files with no extension - fixes #5244 2021-05-17 16:20:29 +01:00
Nick Craig-Wood
82f1f7d2c4 config: expand docs on config protocol #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
6555d3eb33 onedrive: fix failed to configure: empty token found error #3455
This bug was caused as part of the config rework
2021-05-17 12:10:58 +01:00
Nick Craig-Wood
03229cf394 bin/config.py: add --rc flag for testing to an rclone rcd #3455 2021-05-17 12:10:58 +01:00
Nick Craig-Wood
f572bf7829 Add sp31415t1 to contributors 2021-05-17 12:10:58 +01:00
sp31415t1
f593558dc2 docs: improve --disable help 2021-05-14 15:44:58 +01:00
Ivan Andreev
08040a57b0 dropbox: improve "own App IP" instructions (#5325)
Instructions in https://rclone.org/dropbox/#get-your-own-dropbox-app-id
are a little incomplete. I had to guess a few extra details to make things work.
This patch adds missing parts.

Fixes #5242
2021-05-14 17:42:30 +03:00
Alexey Ivanov
2fa7a3c0fb dropbox: simplify chunked uploads
Signed-off-by: Alexey Ivanov <rbtz@dropbox.com>
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
798d1293df Add Alexey Ivanov to contributors 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
75c417ad93 dropbox: fix async batch missing the last few entries 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
5ee646f264 dropbox: make batcher retry all errors so it doesn't exit early
See: https://forum.rclone.org/t/dropbox-too-many-requests-or-write-operations-trying-again-in-15-seconds/23316/18
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
4a4aca4da7 dropbox: fix deadlock in batch Commit 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
2e4b65f888 dropbox: add --dropbox-batch-mode flag to speed up uploading #5156
This adds 3 upload modes for dropbox off, sync and async and makes
sync the default.

This should improve uploads (especially for small files) greatly.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
77cda6773c config: tidy code to use UpdateRemote/CreateRemote instead of editOptions #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
dbc5167281 bin: add config.py as an example of how to use the state based config #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
635d1e10ae config create: add --state and --result parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
296ceadda6 fs: add --all to rclone config create/update to ask all the config questions #3455
This also factors the config questions into a state based mechanism so
a backend can be configured using the same dialog as rclone config but
remotely.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
7ae2891252 fs: Add Exclusive parameter to Option to choose Examples only #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
99caf79ffe config: allow config create and friends to take key=value parameters #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
095cf9e4be config create: add --non-interactive and --continue parameters #3455
This adds a mechanism to add external interfaces to rclone using the
state based configuration.
2021-05-14 14:07:44 +01:00
buengese
e57553930f jottacloud: fix legacy auth with state based config system
...also some minor cleanup
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
f122808d86 fs: add names to each config parameter so we can override them #3455 2021-05-14 14:07:44 +01:00
Nick Craig-Wood
94dbfa4ea6 fs: change Config callback into state based callback #3455
This is a very large change which turns the post Config function in
backends into a state based call and response system so that
alternative user interfaces can be added.

The existing config logic has been converted, but it is quite
complicated and folloup commits will likely be needed to fix it!

Follow up commits will add a command line and API based way of using
this configuration system.
2021-05-14 14:07:44 +01:00
Nick Craig-Wood
6f2e525821 Add Antoon Prins to contributors 2021-05-14 14:07:44 +01:00
Ivan Andreev
119bddc10b selfupdate: fix archive name on macos 2021-05-13 22:35:39 +03:00
albertony
28e9fd45cc vfs: avoid unnecessary subdir in cache path
Fixes #5316
2021-05-13 11:16:42 +02:00
Antoon Prins
326f3b35ff webdav: add headers option 2021-05-12 09:52:07 +01:00
albertony
ce83228cb2 sftp: expand tilde and environment variables in configured known_hosts_file (#5322)
Fixes #5220
2021-05-11 19:58:26 +02:00
Chris Macklin
732bc08ced config: replace defaultConfig with a thread-safe in-memory implementation 2021-05-07 16:04:09 +01:00
Nick Craig-Wood
6ef7178ee4 local: always use readlink to read symlink size
It was discovered on some Android systems, the stat size of a symlink
is different to the size that readlink returns.

This was giving errors like this

    transport connection broken: http: ContentLength=30 with Body length 28

There are enough exceptions to the size of readlink being different to
the size of stat that this patch now always does readlink to work out
the size of a symlink.

Since symlinks are relatively uncommon this shouldn't affect
performance too much and will mean that the size is always correct.

This deprecates the --local-zero-size-links flag which is now
effectively always enabled.

See: https://forum.rclone.org/t/problem-with-symlinks-and-links/23840/
2021-05-04 08:53:09 +01:00
Nick Craig-Wood
9ff6f48d74 Remove accidentally committed *.orig and *.rej files and ignore 2021-05-03 07:58:29 +01:00
Nick Craig-Wood
532af77fd1 Add Chris Macklin to contributors 2021-05-03 07:58:29 +01:00
Nolan Woods
ab7dfe0c87 http: clean up Bind to better use middleware 2021-05-02 11:31:01 +01:00
Nolan Woods
e489a101f6 lib/http: add default 404 handler 2021-05-02 11:30:02 +01:00
Chris Macklin
35a86193b7 accounting: deglobalize startTime/elapsedTime - fixes #5282 2021-05-01 14:51:21 +01:00
x0b
2833941da8 build: add gomobile android build 2021-04-30 20:39:04 +01:00
Nick Craig-Wood
9e6c23d9af fs: add --disable-http2 for global http2 disable #5253 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
8bef972262 Add Gautam Kumar to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
0a968818f6 Add Nolan Woods to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
c2ac353183 Add lewisxy to contributors 2021-04-30 20:31:04 +01:00
Nick Craig-Wood
773da395fb Add Tatsuya Noyori to contributors 2021-04-30 20:31:04 +01:00
Gautam Kumar
9e8cd6bff9 docs: fixed some typos 2021-04-28 22:55:27 +01:00
Nolan Woods
5d2e327b6f http: Replace httplib with lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
77221d7528 httplib: Deprecate package 2021-04-28 22:54:15 +01:00
Nolan Woods
1971c1ef87 httplib: Move httplib/serve/data to ../serve/http/data 2021-04-28 22:54:15 +01:00
Nolan Woods
7e7dbe16c2 httplib: Add --template config and flags to serve/data 2021-04-28 22:54:15 +01:00
Nolan Woods
002d323c94 lib/http: Move HTTP object serialization logic to lib/http 2021-04-28 22:54:15 +01:00
Nolan Woods
4ad62ec016 lib/http: Add authentication middleware with basic auth implementation 2021-04-28 22:54:15 +01:00
Nolan Woods
95ee14bb2c feat: Add lib/http
lib/http provides an abstraction for a central http server that services can bind routes to
2021-04-28 22:54:15 +01:00
Romeo Kienzler
88aabd1f71 docs: corrected spelling
from "Check the integrity of an encrypted remote." to "Check the integrity of a crypted remote."
2021-04-28 22:50:55 +01:00
144 changed files with 7194 additions and 5507 deletions

View File

@@ -267,6 +267,15 @@ jobs:
run: |
make
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |

2
.gitignore vendored
View File

@@ -11,3 +11,5 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej

View File

@@ -62,6 +62,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -87,7 +88,6 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -69,12 +69,10 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",

View File

@@ -83,7 +83,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -92,16 +92,15 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return errors.Wrap(err, "failed to configure token with jwt authentication")
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token with oauth authentication")
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
return nil
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",

View File

@@ -339,8 +339,14 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)

View File

@@ -182,32 +182,64 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return errors.Wrap(err, "couldn't parse config into struct")
return nil, errors.Wrap(err, "couldn't parse config into struct")
}
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
switch config.State {
case "":
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
}
return fs.ConfigGoto("teamdrive")
case "teamdrive":
if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
}
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok":
if config.Result == "false" {
m.Set("team_drive", "")
return nil, nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if len(teamDrives) == 0 {
return fs.ConfigError("", "No Shared Drives found in your account")
}
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
teamDrive := teamDrives[i]
return teamDrive.Id, teamDrive.Name
})
case "teamdrive_final":
driveID := config.Result
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil, nil
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
return errors.Wrap(err, "failed to configure Shared Drive")
}
return nil
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@@ -948,48 +980,6 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
} else {
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
fmt.Printf("Fetching Shared Drive list...\n")
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return err
}
if len(teamDrives) == 0 {
fmt.Printf("No Shared Drives found in your account")
return nil
}
var driveIDs, driveNames []string
for _, teamDrive := range teamDrives {
driveIDs = append(driveIDs, teamDrive.Id)
driveNames = append(driveNames, teamDrive.Name)
}
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
@@ -1168,7 +1158,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
}
}
f.rootFolderID = rootID
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
}
f.dirCache = dircache.New(f.root, f.rootFolderID, f)

350
backend/dropbox/batcher.go Normal file
View File

@@ -0,0 +1,350 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxTries = 120
for try := 1; try <= maxTries; try++ {
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
} else {
if batchStatus.Tag == "complete" {
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > time.Second {
sleepTime = time.Second
}
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -138,23 +138,19 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
opt := oauthutil.Options{
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
@@ -213,6 +209,63 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -236,6 +289,10 @@ type Options struct {
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -255,6 +312,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -270,8 +328,6 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -382,6 +438,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -1381,6 +1441,13 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1540,97 +1607,83 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
// start upload
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after the first chunk is uploaded, we retry everything
// after session is started, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
currentChunk++
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
}
// write the remains
// finish upload
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1697,7 +1750,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1708,6 +1761,15 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "upload failed")
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1735,6 +1797,7 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -4,6 +4,7 @@ import (
"context"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
@@ -90,6 +91,7 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -118,10 +120,16 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
}
var sharedFiles SharedFolderResponse
@@ -311,7 +319,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.New("Can't remove non-empty dir")
return nil, errors.Errorf("can't remove folder: %s", response.Message)
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -396,6 +404,34 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")

View File

@@ -44,6 +44,18 @@ func init() {
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -75,9 +87,11 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -423,25 +437,45 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
}
file, err := f.readFileInfo(ctx, resp.URLs[0])
file, err := f.readFileInfo(ctx, url)
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -472,7 +506,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)

View File

@@ -19,6 +19,7 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -63,8 +64,9 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
URLs []string `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -76,9 +78,10 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
@@ -87,6 +90,30 @@ type FileCopy struct {
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`

View File

@@ -5,6 +5,7 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -51,6 +52,23 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
@@ -115,7 +133,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From int `json:"from,string"`
From Int `json:"from"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -21,6 +21,7 @@ import (
"io/ioutil"
"net/http"
"path"
"strconv"
"strings"
"time"
@@ -50,10 +51,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -75,18 +76,16 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil
return nil, nil
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
@@ -922,7 +921,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormatIn, mtimeString)
modTime, err := time.Parse(timeFormat, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -930,8 +929,19 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormatIn, info.Updated)
modTime, err := time.Parse(timeFormat, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
@@ -988,7 +998,8 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormatOut)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
return metadata
}
@@ -1000,11 +1011,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = mtime
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()

View File

@@ -77,36 +77,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return errors.Wrap(err, "couldn't parse config into struct")
return nil, errors.Wrap(err, "couldn't parse config into struct")
}
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
}
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
return nil
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",

View File

@@ -55,12 +55,10 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})

View File

@@ -48,37 +48,29 @@ const (
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configUsername = "username"
configVersion = 1
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
defaultClientID = "jottacli"
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
// needs to be done early so we can use oauth during config
@@ -86,44 +78,7 @@ func init() {
Name: "jottacloud",
Description: "Jottacloud",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return errors.Wrap(err, "failed to parse config version - corrupted config")
}
refresh = (ver != configVersion) && (ver != v1configVersion)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return nil
}
}
}
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
return v2config(ctx, name, m)
case 2:
return v1config(ctx, name, m)
case 3:
return teliaCloudConfig(ctx, name, m)
default:
return errors.New("unknown config choice")
}
},
Config: Config,
Options: []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -158,6 +113,183 @@ func init() {
})
}
// Config runs the backend configuration protocol
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
Value: "standard",
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
}, {
Value: "legacy",
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia",
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := config.Result
m.Set(configClientID, defaultClientID)
m.Set(configClientSecret, "")
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one
only uses rclone on a single machine. When you want to use rclone with
this account on more than one machine it's recommended to create a
machine specific API key. These keys can NOT be shared between
machines.`)
case "legacy_api":
srv := rest.NewClient(fshttp.NewClient(ctx))
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, errors.Wrap(err, "failed to register device")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
case "legacy_username":
m.Set(configUsername, config.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
case "legacy_password":
m.Set("password", config.Result)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
username, _ := m.Get(configUsername)
password, _ := m.Get("password")
password = obscure.MustReveal(password)
authCode, _ := m.Get("auth_code")
srv := rest.NewClient(fshttp.NewClient(ctx))
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: legacyTokenURL,
},
ClientID: clientID,
ClientSecret: obscure.MustReveal(clientSecret),
}
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
if err == errAuthCodeRequired {
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
}
m.Set("password", "")
m.Set("auth_code", "")
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
case "choose_device_query":
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
return fs.ConfigGoto("end")
}
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
m.Set(configUsername, cust.Username)
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
return acc.Devices[i].Name, ""
})
case "choose_device_result":
device := config.Result
m.Set(configDevice, device)
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
username, _ := m.Get(configUsername)
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := config.Result
m.Set(configMountpoint, mountpoint)
return fs.ConfigGoto("end")
case "end":
// All the config flows end up here in case we need to carry on with something
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
@@ -218,10 +350,21 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
// joinPath joins two path/url elements
//
// Does not perform clean on the result like path.Join does,
// which breaks urls by changing prefix "https://" into "https:/".
func joinPath(base string, rel string) string {
if rel == "" {
return base
}
if strings.HasSuffix(base, "/") {
return base + strings.TrimPrefix(rel, "/")
}
if strings.HasPrefix(rel, "/") {
return strings.TrimSuffix(base, "/") + rel
}
return base + "/" + rel
}
// retryErrorCodes is a slice of error codes that we will retry
@@ -243,111 +386,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) error {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return nil
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) error {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return errors.Wrap(err, "failed to register device")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
return errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return errors.Wrap(err, "error while saving token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
return nil
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
@@ -366,7 +404,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
opts := rest.Opts{
Method: "POST",
RootURL: v1registerURL,
RootURL: legacyRegisterURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
@@ -377,8 +415,13 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
var errAuthCodeRequired = errors.New("auth code required")
// doLegacyAuth runs the actual token request for V1 authentication
//
// Call this first with blank authCode. If errAuthCodeRequired is
// returned then call it again with an authCode
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
@@ -392,22 +435,19 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
if authCode != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
if err != nil && authCode == "" {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
return token, errAuthCodeRequired
}
}
}
@@ -419,52 +459,11 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) error {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
return errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return errors.Wrap(err, "error while saving token")
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
return errors.Wrap(err, "failed to setup mountpoint")
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
return nil
}
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
// doTokenAuth runs the actual token request for V2 authentication
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
return token, "", err
}
// decode login token
@@ -472,7 +471,7 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
return token, "", err
}
// retrieve endpoint urls
@@ -481,19 +480,14 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, err
return token, "", err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("client_id", defaultClientID)
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
@@ -501,68 +495,33 @@ func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
RootURL: wellKnown.TokenEndpoint,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
return token, "", err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
return token, wellKnown.TokenEndpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = srv.CallJSON(ctx, &opts, nil, &info)
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
@@ -679,7 +638,7 @@ func (f *Fs) filePath(file string) string {
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if v1tokenURL == req.URL.String() {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
@@ -695,53 +654,50 @@ func grantTypeFilter(req *http.Request) {
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
return nil, nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == v1configVersion)
ok = (ver == configVersion) || (ver == legacyConfigVersion)
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = "jottacli"
oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == v1configVersion {
} else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
clientID = legacyClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
clientSecret = legacyEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
@@ -754,13 +710,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Create OAuth Client
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
return oAuthClient, ts, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
root = strings.Trim(root, "/")
f := &Fs{
name: name,
@@ -1298,8 +1270,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if result.PublicSharePath == "" {
return "", errors.New("couldn't create public link - no link path received")
}
link = path.Join(baseURL, result.PublicSharePath)
return link, nil
return joinPath(baseURL, result.PublicSharePath), nil
}
// About gets quota information

View File

@@ -27,6 +27,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// Constants
@@ -73,25 +74,34 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead)
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
- Windows
- On some virtual filesystems (such ash LucidLink)
- Android
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
So rclone now always reads the link
`,
Default: false,
Advanced: true,
}, {
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
Rclone does not normally touch the encoding of file names it reads from
the file system.
This can be useful when using macOS as it normally provides decomposed (NFD)
unicode which in some language (eg Korean) doesn't display properly on
some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
Default: false,
Advanced: true,
}, {
@@ -196,8 +206,7 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
UTFNorm bool `config:"unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
@@ -256,10 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
@@ -522,6 +527,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
if f.opt.UTFNorm {
filename = norm.NFC.String(filename)
}
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
@@ -1017,7 +1025,6 @@ func (file *localOpenFile) Close() (err error) {
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
// Read the link and return the destination it as the contents of the object
linkdst, err := os.Readlink(o.path)
fs.Infof(o, "openTranslatedLink link = %q, offset = %d, limit = %d, err = %v", linkdst, offset, limit, err)
if err != nil {
return nil, err
}
@@ -1268,11 +1275,14 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
// Read the size of the link.
//
// The value in info.Size() is not always correct
// - Windows links read as 0 size
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
// - Android - some versions the links are larger than readlink suggests
if o.translatedLink {
linkdst, err := os.Readlink(o.path)
fs.Infof(o, "setMetadata link = %q, err = %v", linkdst, err)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {

View File

@@ -80,7 +80,7 @@ var oauthConfig = &oauth2.Config{
// Register with Fs
func init() {
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
fs.Register(&fs.RegInfo{
Name: "mailru",
Description: "Mail.ru Cloud",

View File

@@ -93,213 +93,12 @@ var (
// Register with Fs
func init() {
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
fs.Register(&fs.RegInfo{
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
region, _ := m.Get("region")
graphURL := graphAPIEndpoint[region] + "/v1.0"
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
ci := fs.GetConfig(ctx)
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to configure OneDrive")
}
srv := rest.NewClient(oAuthClient)
var opts rest.Opts
var finalDriveID string
var siteID string
var relativePath string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
[]string{
"OneDrive Personal or Business",
"Root Sharepoint site",
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
"Search for a Sharepoint site",
"Type in driveID (advanced)",
"Type in SiteID (advanced)",
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
},
false) {
case "onedrive":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
}
case "sharepoint":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
}
case "driveid":
fmt.Printf("Paste your Drive ID here> ")
finalDriveID = config.ReadLine()
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "url":
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
fmt.Printf("Paste your Site URL here> ")
siteURL := config.ReadLine()
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
relativePath = "/sites/" + match[1]
} else {
relativePath = "/sites/" + siteURL
}
case "path":
fmt.Printf("Enter server-relative URL here> ")
relativePath = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return errors.Wrap(err, "failed to query available sites")
}
if len(sites.Sites) == 0 {
return errors.Errorf("search for %q returned no results", searchTerm)
}
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
for index, site := range sites.Sites {
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
}
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
}
// if we use server-relative URL for finding the drive
if relativePath != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &site)
if err != nil {
return errors.Wrap(err, "failed to query available site by relative path")
}
siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if siteID != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + siteID + "/drives",
}
}
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
if err != nil {
return errors.Wrap(err, "failed to query available drives")
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
return errors.Wrap(err, "failed to query available drives")
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
return errors.New("no drives found")
}
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
for index, drive := range drives.Drives {
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
}
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
}
// Test the driveID and get drive type
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
return errors.Wrapf(err, "failed to query root for drive %s", finalDriveID)
}
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
return errors.New("cancelled by user")
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
return nil
},
Config: Config,
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: "Choose national cloud region for OneDrive.",
@@ -462,6 +261,266 @@ At the time of writing this only works with OneDrive personal paid accounts.
})
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region")
graphURL = graphAPIEndpoint[region] + "/v1.0"
return region, graphURL
}
// Config for chooseDrive
type chooseDriveOpt struct {
opts rest.Opts
finalDriveID string
siteID string
relativePath string
}
// chooseDrive returns a query to choose which drive the user is interested in
func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
_, graphURL := getRegionURL(m)
// if we use server-relative URL for finding the drive
if opt.relativePath != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
}
opt.siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if opt.siteID != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + opt.siteID + "/drives",
}
}
drives := drivesResponse{}
// We don't have the final ID yet?
// query Microsoft Graph
if opt.finalDriveID == "" {
_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
} else {
drives.Drives = append(drives.Drives, driveResource{
DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID",
DriveType: "drive",
})
}
if len(drives.Drives) == 0 {
return fs.ConfigError("choose_type", "No drives found")
}
return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
drive := drives.Drives[i]
return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
})
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
if config.State == "" {
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
}
srv := rest.NewClient(oAuthClient)
switch config.State {
case "choose_type":
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
Help: "OneDrive Personal or Business",
}, {
Value: "sharepoint",
Help: "Root Sharepoint site",
}, {
Value: "url",
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
}, {
Value: "search",
Help: "Search for a Sharepoint site",
}, {
Value: "driveid",
Help: "Type in driveID (advanced)",
}, {
Value: "siteid",
Help: "Type in SiteID (advanced)",
}, {
Value: "path",
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
},
})
case "sharepoint":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
},
})
case "driveid":
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + siteURL,
})
case "path":
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
}
if len(sites.Sites) == 0 {
return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
}
return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
site := sites.Sites[i]
return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "driveid_final":
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`

View File

@@ -71,7 +71,7 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
optc := new(Options)
err := configstruct.Set(m, optc)
if err != nil {
@@ -93,15 +93,11 @@ func init() {
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
return nil
}
opt := oauthutil.Options{
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
CheckAuth: checkAuth,
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
}
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -77,12 +77,10 @@ func init() {
Name: "premiumizeme",
Description: "premiumize.me",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: []fs.Option{{
Name: "api_key",

View File

@@ -5,7 +5,6 @@ import (
"regexp"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -60,15 +59,11 @@ func init() {
Name: "putio",
Description: "Put.io",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
opt := oauthutil.Options{
NoOffline: true,
}
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: putioConfig,
NoOffline: true,
})
},
Options: []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -58,7 +58,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -91,6 +91,9 @@ func init() {
}, {
Value: "Scaleway",
Help: "Scaleway Object Storage",
}, {
Value: "SeaweedFS",
Help: "SeaweedFS S3",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@@ -592,6 +595,10 @@ func init() {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
@@ -1693,7 +1700,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject {
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)

View File

@@ -296,83 +296,86 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Config callback for 2FA
func Config(ctx context.Context, name string, m configmap.Mapper) error {
ci := fs.GetConfig(ctx)
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
serverURL, ok := m.Get(configURL)
if !ok || serverURL == "" {
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
return errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: nrclone config reconnect <remote name>: ")
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ")
}
u, err := url.Parse(serverURL)
if err != nil {
return errors.Errorf("invalid server URL %s", serverURL)
return nil, errors.Errorf("invalid server URL %s", serverURL)
}
is2faEnabled, _ := m.Get(config2FA)
if is2faEnabled != "true" {
return errors.New("two-factor authentication is not enabled on this account")
return nil, errors.New("two-factor authentication is not enabled on this account")
}
username, _ := m.Get(configUser)
if username == "" {
return errors.New("a username is required")
return nil, errors.New("a username is required")
}
password, _ := m.Get(configPassword)
if password != "" {
password, _ = obscure.Reveal(password)
}
// Just make sure we do have a password
for password == "" {
fmt.Print("Two-factor authentication: please enter your password (it won't be saved in the configuration)\npassword> ")
password = config.ReadPassword()
}
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
for {
code := ""
for code == "" {
fmt.Print("Two-factor authentication: please enter your 2FA code\n2fa code> ")
code = config.ReadLine()
switch config.State {
case "":
// Just make sure we do have a password
if password == "" {
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
}
return fs.ConfigGoto("password")
case "password":
password = config.Result
if password == "" {
return fs.ConfigError("password", "Password can't be blank")
}
m.Set(configPassword, obscure.MustObscure(config.Result))
return fs.ConfigGoto("2fa")
case "2fa":
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
case "2fa_do":
code := config.Result
if code == "" {
return fs.ConfigError("2fa", "2FA codes can't be blank")
}
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fmt.Println("Authenticating...")
token, err := getAuthorizationToken(ctx, srv, username, password, code)
if err != nil {
fmt.Printf("Authentication failed: %v\n", err)
tryAgain := strings.ToLower(config.ReadNonEmptyLine("Do you want to try again (y/n)?"))
if tryAgain != "y" && tryAgain != "yes" {
// The user is giving up, we're done here
break
}
return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err))
}
if token != "" {
fmt.Println("Success!")
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
break
if token == "" {
return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?")
}
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
return nil, nil
case "2fa_error":
if config.Result == "true" {
return fs.ConfigGoto("2fa")
}
return nil, errors.New("2fa authentication failed")
}
return nil
return nil, fmt.Errorf("unknown state %q", config.State)
}
// sets the AuthorizationToken up

View File

@@ -566,7 +566,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
}

View File

@@ -135,7 +135,7 @@ func init() {
Name: "sharefile",
Description: "Citrix Sharefile",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
@@ -151,14 +151,10 @@ func init() {
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil
}
opt := oauthutil.Options{
CheckAuth: checkAuth,
}
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
CheckAuth: checkAuth,
})
},
Options: []fs.Option{{
Name: "upload_cutoff",

View File

@@ -75,51 +75,63 @@ func init() {
Name: "sugarsync",
Description: "Sugarsync",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return errors.Wrap(err, "failed to read options")
return nil, errors.Wrap(err, "failed to read options")
}
if opt.RefreshToken != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
return nil
switch config.State {
case "":
if opt.RefreshToken == "" {
return fs.ConfigGoto("username")
}
}
fmt.Printf("Username (email address)> ")
username := config.ReadLine()
password := config.GetPassword("Your Sugarsync password is only required during setup and will not be stored.")
return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?")
case "refresh":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("username")
case "username":
return fs.ConfigInput("password", "config_username", "username (email address)")
case "password":
m.Set("username", config.Result)
return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.")
case "auth":
username, _ := m.Get("username")
m.Set("username", "")
password := config.Result
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return errors.Wrap(err, "failed to get token")
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return nil, errors.Wrap(err, "failed to get token")
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
return nil, nil
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
return nil
},
Options: []fs.Option{{
return nil, fmt.Errorf("unknown state %q", config.State)
}, Options: []fs.Option{{
Name: "app_id",
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
}, {

View File

@@ -41,19 +41,19 @@ func init() {
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) error {
provider, _ := configMapper.Get(fs.ConfigProvider)
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
satelliteString, _ := m.Get("satellite_address")
apiKey, _ := m.Get("api_key")
passphrase, _ := m.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return nil
return nil, nil
}
satellite, found := satMap[satelliteString]
@@ -63,23 +63,23 @@ func init() {
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
return errors.Wrap(err, "couldn't create access grant")
return nil, errors.Wrap(err, "couldn't create access grant")
}
serializedAccess, err := access.Serialize()
if err != nil {
return errors.Wrap(err, "couldn't serialize access grant")
return nil, errors.Wrap(err, "couldn't serialize access grant")
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serializedAccess)
m.Set("satellite_address", satellite)
m.Set("access_grant", serializedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
return errors.Errorf("invalid provider type: %s", provider)
return nil, errors.Errorf("invalid provider type: %s", provider)
}
return nil
return nil, nil
},
Options: []fs.Option{
{

View File

@@ -113,6 +113,21 @@ func init() {
Name: config.ConfigEncoding,
Help: configEncodingHelp,
Advanced: true,
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}},
})
}
@@ -126,6 +141,7 @@ type Options struct {
BearerToken string `config:"bearer_token"`
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"`
}
// Fs represents a remote webdav
@@ -359,6 +375,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
fs.Debugf(nil, "found headers: %v", opt.Headers)
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
@@ -428,6 +450,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
}
if opt.Headers != nil {
f.addHeaders(opt.Headers)
}
f.srv.SetErrorHandler(errorHandler)
err = f.setQuirks(ctx, opt.Vendor)
if err != nil {
@@ -487,6 +512,15 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
return stdoutString, nil
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(headers fs.CommaSepList) {
for i := 0; i < len(headers); i += 2 {
key := f.opt.Headers[i]
value := f.opt.Headers[i+1]
f.srv.SetHeader(key, value)
}
}
// fetch the bearer token and set it if successful
func (f *Fs) fetchAndSetBearerToken() error {
if f.opt.BearerTokenCommand == "" {

View File

@@ -0,0 +1,74 @@
package webdav_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/rclone/rclone/backend/webdav"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestWebDAV"
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
// with each request the headers option tests are executed
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server
fileServer := http.FileServer(http.Dir(""))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "webdav",
"url": ts.URL,
// add headers to test the headers option
"headers": strings.Join(headers, ","),
}
// return a function to tidy up
return m, ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate the WebDAV server
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
}
// TestHeaders any request will test the headers option
func TestHeaders(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
// any request will do
_, err := f.Features().About(context.Background())
require.NoError(t, err)
}

View File

@@ -60,12 +60,10 @@ func init() {
Name: "yandex",
Description: "Yandex Disk",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
if err != nil {
return errors.Wrap(err, "failed to configure token")
}
return nil
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -72,45 +72,97 @@ func init() {
Name: "zoho",
Description: "Zoho",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
// Need to setup region before configuring oauth
err := setupRegion(m)
if err != nil {
return err
return nil, err
}
opt := oauthutil.Options{
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
}
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
return errors.Wrap(err, "failed to configure token")
}
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return errors.Wrap(err, "failed to read token")
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to configure token")
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
}
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
return authSrv, apiSrv, nil
}
if fs.GetConfig(ctx).AutoConfirm {
return nil
}
switch config.State {
case "":
return oauthutil.ConfigOut("teams", &oauthutil.Options{
OAuth2Config: oauthConfig,
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
})
case "teams":
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to read token")
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token")
}
}
if err = setupRoot(ctx, name, m); err != nil {
return errors.Wrap(err, "failed to configure root directory")
authSrv, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
// Get the user Info
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return nil, err
}
// Get the teams
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) {
team := teams[i]
return team.ID, team.Attributes.Name
})
case "workspace":
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
teamID := config.Result
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
workspace := workspaces[i]
return workspace.ID, workspace.Attributes.Name
})
case "workspace_end":
worksspaceID := config.Result
m.Set(configRootID, worksspaceID)
return nil, nil
}
return nil
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
Help: `Zoho region to connect to.
You'll have to use the region your organization is registered in. If
not sure use the same top level domain as you connect to in your
browser.`,
Examples: []fs.OptionExample{{
Value: "com",
Help: "United states / Global",
@@ -209,49 +261,6 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
return workspaceList.TeamWorkspace, nil
}
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return errors.Wrap(err, "failed to load oAuthClient")
}
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return err
}
apiSrv := rest.NewClient(oAuthClient).SetRoot(rootURL)
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return err
}
var teamIDs, teamNames []string
for _, team := range teams {
teamIDs = append(teamIDs, team.ID)
teamNames = append(teamNames, team.Attributes.Name)
}
teamID := config.Choose("Enter a Team Drive ID", teamIDs, teamNames, true)
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return err
}
var workspaceIDs, workspaceNames []string
for _, workspace := range workspaces {
workspaceIDs = append(workspaceIDs, workspace.ID)
workspaceNames = append(workspaceNames, workspace.Attributes.Name)
}
worksspaceID := config.Choose("Enter a Workspace ID", workspaceIDs, workspaceNames, true)
m.Set(configRootID, worksspaceID)
return nil
}
// --------------------------------------------------------------
// retryErrorCodes is a slice of error codes that we will retry

203
bin/config.py Executable file
View File

@@ -0,0 +1,203 @@
#!/usr/bin/env python3
"""
Test program to demonstrate the remote config interfaces in
rclone.
This program can simulate
rclone config create
rclone config update
rclone config password - NOT implemented yet
rclone authorize - NOT implemented yet
Pass the desired action as the first argument then any parameters.
This assumes passwords will be passed in the clear.
"""
import argparse
import subprocess
import json
from pprint import pprint
sep = "-"*60
def rpc(args, command, params):
"""
Run the command. This could be either over the CLI or the API.
Here we run over the API either using `rclone rc --loopback` which
is useful for making sure state is saved properly or to an
existing rclone rcd if `--rc` is used on the command line.
"""
if args.rc:
import requests
kwargs = {
"json": params,
}
if args.user:
kwargs["auth"] = (args.user, args.password)
r = requests.post('http://localhost:5572/'+command, **kwargs)
if r.status_code != 200:
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
return r.json()
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(result.stdout)
def parse_parameters(parameters):
"""
Parse the incoming key=value parameters into a dict
"""
d = {}
for param in parameters:
parts = param.split("=", 1)
if len(parts) != 2:
raise ValueError("bad format for parameter need name=value")
d[parts[0]] = parts[1]
return d
def ask(opt):
"""
Ask the user to enter the option
This is the user interface for asking a user a question.
If there are examples they should be presented.
"""
while True:
if opt["IsPassword"]:
print("*** Inputting a password")
print(opt['Help'])
examples = opt.get("Examples", ())
or_number = ""
if len(examples) > 0:
or_number = " or choice number"
for i, example in enumerate(examples):
print(f"{i:3} value: {example['Value']}")
print(f" help: {example['Help']}")
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
print(f"{opt['Name']}> ", end='')
s = input()
if s == "":
return opt["DefaultStr"]
try:
i = int(s)
if i >= 0 and i < len(examples):
return examples[i]["Value"]
except ValueError:
pass
if opt["Exclusive"]:
for example in examples:
if s == example["Value"]:
return s
# Exclusive is set but the value isn't one of the accepted
# ones so continue
print("Value isn't one of the acceptable values")
else:
return s
return s
def create_or_update(what, args):
"""
Run the equivalent of rclone config create
or rclone config update
what should either be "create" or "update
"""
print(what, args)
params = parse_parameters(args.parameters)
inp = {
"name": args.name,
"parameters": params,
"opt": {
"nonInteractive": True,
"all": args.all,
"noObscure": args.obscured_passwords,
"obscure": not args.obscured_passwords,
},
}
if what == "create":
inp["type"] = args.type
while True:
print(sep)
print("Input to API")
pprint(inp)
print(sep)
out = rpc(args, "config/"+what, inp)
print(sep)
print("Output from API")
pprint(out)
print(sep)
if out["State"] == "":
return
if out["Error"]:
print("Error", out["Error"])
result = ask(out["Option"])
inp["opt"]["state"] = out["State"]
inp["opt"]["result"] = result
inp["opt"]["continue"] = True
def create(args):
"""Run the equivalent of rclone config create"""
create_or_update("create", args)
def update(args):
"""Run the equivalent of rclone config update"""
create_or_update("update", args)
def password(args):
"""Run the equivalent of rclone config password"""
print("password", args)
raise NotImplementedError()
def authorize(args):
"""Run the equivalent of rclone authorize"""
print("authorize", args)
raise NotImplementedError()
def main():
"""
Make the command line parser and dispatch
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("-a", "--all", action='store_true',
help="Ask all the config questions if set")
parser.add_argument("-o", "--obscured-passwords", action='store_true',
help="If set assume the passwords are obscured")
parser.add_argument("--rc", action='store_true',
help="If set use the rc (you'll need to start an rclone rcd)")
parser.add_argument("--user", type=str, default="",
help="Username for use with --rc")
parser.add_argument("--pass", type=str, default="", dest='password',
help="Password for use with --rc")
subparsers = parser.add_subparsers(dest='command', required=True)
subparser = subparsers.add_parser('create')
subparser.add_argument("name", type=str, help="Name of remote to create")
subparser.add_argument("type", type=str, help="Type of remote to create")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=create)
subparser = subparsers.add_parser('update')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=update)
subparser = subparsers.add_parser('password')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=password)
subparser = subparsers.add_parser('authorize')
subparser.set_defaults(func=authorize)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -18,7 +18,6 @@ import (
_ "github.com/rclone/rclone/cmd/copyurl"
_ "github.com/rclone/rclone/cmd/cryptcheck"
_ "github.com/rclone/rclone/cmd/cryptdecode"
_ "github.com/rclone/rclone/cmd/dbhashsum"
_ "github.com/rclone/rclone/cmd/dedupe"
_ "github.com/rclone/rclone/cmd/delete"
_ "github.com/rclone/rclone/cmd/deletefile"

View File

@@ -9,6 +9,7 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -156,6 +157,12 @@ to check all the data.
if download {
return operations.CheckDownload(context.Background(), opt)
}
hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne()
if hashType == hash.None {
fs.Errorf(nil, "No common hash found - not using a hash for checks")
} else {
fs.Infof(nil, "Using %v for hash comparisons", hashType)
}
return operations.Check(context.Background(), opt)
})
},

View File

@@ -105,12 +105,14 @@ var configProvidersCommand = &cobra.Command{
},
}
var (
configObscure bool
configNoObscure bool
)
var updateRemoteOpt config.UpdateRemoteOpt
var configPasswordHelp = strings.ReplaceAll(`
Note that if the config process would normally ask a question the
default is taken (unless |--non-interactive| is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
const configPasswordHelp = `
If any of the parameters passed is a password field, then rclone will
automatically obscure them if they aren't already obscured before
putting them in the config file.
@@ -119,84 +121,170 @@ putting them in the config file.
consists only of base64 characters then rclone can get confused about
whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the "--obscure" flag, or if you
the passwords get obscured then use the |--obscure| flag, or if you
are 100% certain you are already passing obscured passwords then use
"--no-obscure". You can also set obscured passwords using the
"rclone config password" command.
`
|--no-obscure|. You can also set obscured passwords using the
|rclone config password| command.
The flag |--non-interactive| is for use by applications that wish to
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.
This will look something like (some irrelevant detail removed):
|||
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
|||
The format of |Option| is the same as returned by |rclone config
providers|. The question should be asked to the user and returned to
rclone as the |--result| option along with the |--state| parameter.
The keys of |Option| are used as follows:
- |Name| - name of variable - show to user
- |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
- |Default| - default value - return this if the user just wants the default.
- |Examples| - the user should be able to choose one of these
- |Required| - the value should be non-empty
- |IsPassword| - the value is a password and should be edited as such
- |Type| - type of value, eg |bool|, |string|, |int| and others
- |Exclusive| - if set no free-form entry allowed only the |Examples|
- Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced|
If |Error| is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
Note that when using |--continue| all passwords should be passed in
the clear (not obscured). Any default config values should be passed
in with each invocation of |--continue|.
At the end of the non interactive process, rclone will return a result
with |State| as empty string.
If |--all| is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that |bin/config.py| in the rclone source implements this protocol
as a readable demonstration.
`, "|", "`")
var configCreateCommand = &cobra.Command{
Use: "create `name` `type` [`key` `value`]*",
Short: `Create a new remote with name, type and options.`,
Long: `
Create a new remote of ` + "`name`" + ` with ` + "`type`" + ` and options. The options
should be passed in pairs of ` + "`key` `value`" + `.
Long: strings.ReplaceAll(`
Create a new remote of |name| with |type| and options. The options
should be passed in pairs of |key| |value| or as |key=value|.
For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
Note that if the config process would normally ask a question the
default is taken. Each time that happens rclone will print a message
saying how to affect the value taken.
` + configPasswordHelp + `
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local false
`,
rclone config create mydrive drive config_is_local=false
`, "|", "`") + configPasswordHelp,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
in, err := argsToMap(args[2:])
if err != nil {
return err
}
err = config.CreateRemote(context.Background(), args[0], args[1], in, configObscure, configNoObscure)
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.CreateRemote(context.Background(), args[0], args[1], in, opts)
})
},
}
func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error {
out, err := do(updateRemoteOpt)
if err != nil {
return err
}
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
config.ShowRemote(name)
} else {
if out == nil {
out = &fs.ConfigOut{}
}
outBytes, err := json.MarshalIndent(out, "", "\t")
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
_, _ = os.Stdout.Write(outBytes)
_, _ = os.Stdout.WriteString("\n")
}
return nil
}
func init() {
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
flags.BoolVarP(cmdFlags, &configObscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &configNoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
}
}
var configUpdateCommand = &cobra.Command{
Use: "update `name` [`key` `value`]+",
Short: `Update options in an existing remote.`,
Long: `
Long: strings.ReplaceAll(`
Update an existing remote's options. The options should be passed in
in pairs of ` + "`key` `value`" + `.
pairs of |key| |value| or as |key=value|.
For example to update the env_auth field of a remote of name myremote
you would do:
rclone config update myremote swift env_auth true
` + configPasswordHelp + `
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote swift env_auth true config_refresh_token false
`,
rclone config update myremote env_auth=true config_refresh_token=false
`, "|", "`") + configPasswordHelp,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
cmd.CheckArgs(1, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
}
err = config.UpdateRemote(context.Background(), args[0], in, configObscure, configNoObscure)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.UpdateRemote(context.Background(), args[0], in, opts)
})
},
}
@@ -212,19 +300,21 @@ var configDeleteCommand = &cobra.Command{
var configPasswordCommand = &cobra.Command{
Use: "password `name` [`key` `value`]+",
Short: `Update password in an existing remote.`,
Long: `
Long: strings.ReplaceAll(`
Update an existing remote's password. The password
should be passed in pairs of ` + "`key` `value`" + `.
should be passed in pairs of |key| |password| or as |key=password|.
The |password| should be passed in in clear (unobscured).
For example to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.
`,
`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 256, command, args)
cmd.CheckArgs(1, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
@@ -238,16 +328,24 @@ both support obscuring passwords directly.
},
}
// This takes a list of arguments in key value key value form and
// converts it into a map
// This takes a list of arguments in key value key value form, or
// key=value key=value form and converts it into a map
func argsToMap(args []string) (out rc.Params, err error) {
if len(args)%2 != 0 {
return nil, errors.New("found key without value")
}
out = rc.Params{}
// Set the config
for i := 0; i < len(args); i += 2 {
out[args[i]] = args[i+1]
for i := 0; i < len(args); i++ {
key := args[i]
equals := strings.IndexRune(key, '=')
var value string
if equals >= 0 {
key, value = key[:equals], key[equals+1:]
} else {
i++
if i >= len(args) {
return nil, errors.New("found key without value")
}
value = args[i]
}
out[key] = value
}
return out, nil
}
@@ -265,14 +363,11 @@ This normally means going through the interactive oauth flow again.
RunE: func(command *cobra.Command, args []string) error {
ctx := context.Background()
cmd.CheckArgs(1, 1, command, args)
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
fsInfo, configName, _, m, err := fs.ConfigFs(args[0])
if err != nil {
return err
}
if fsInfo.Config == nil {
return errors.Errorf("%s: doesn't support Reconnect", configName)
}
return fsInfo.Config(ctx, configName, config)
return config.PostConfig(ctx, configName, m, fsInfo)
},
}

59
cmd/config/config_test.go Normal file
View File

@@ -0,0 +1,59 @@
package config
import (
"fmt"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
)
func TestArgsToMap(t *testing.T) {
for _, test := range []struct {
args []string
want rc.Params
wantErr bool
}{
{
args: []string{},
want: rc.Params{},
},
{
args: []string{"hello", "42"},
want: rc.Params{"hello": "42"},
},
{
args: []string{"hello", "42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye", "43", "unused"},
wantErr: true,
},
{
args: []string{"hello=42", "bye=43", "unused"},
wantErr: true,
},
} {
what := fmt.Sprintf("args = %#v", test.args)
got, err := argsToMap(test.args)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
assert.Equal(t, test.want, got, what)
}
}
}

View File

@@ -37,8 +37,8 @@ Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
the from URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
the URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resulting file name will
be printed.
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the

View File

@@ -1,51 +0,0 @@
package dbhashsum
import (
"context"
"github.com/rclone/rclone/backend/dropbox"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/hashsum"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
hashsum.AddHashFlags(cmdFlags)
}
var commandDefinition = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbox hash file for all the objects in the path.`,
Long: `
Produces a Dropbox hash file for all the objects in the path. The
hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
By default, the hash is requested from the remote. If Dropbox hash is
not supported by the remote, no hash will be returned. With the
download flag, the file will be downloaded from the remote and
hashed locally enabling Dropbox hash for any remote.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
cmd.Run(false, false, command, func() error {
if hashsum.HashsumOutfile == "" {
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
}
output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile)
if err != nil {
return err
}
defer close()
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
})
},
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
@@ -15,7 +16,7 @@ import (
"github.com/spf13/pflag"
)
// Global hashsum flags for reuse in md5sum, sha1sum, and dbhashsum
// Global hashsum flags for reuse in hashsum, md5sum, sha1sum
var (
OutputBase64 = false
DownloadFlag = false
@@ -28,7 +29,7 @@ func init() {
AddHashFlags(cmdFlags)
}
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum, and dbhashsum
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
func AddHashFlags(cmdFlags *pflag.FlagSet) {
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
@@ -69,23 +70,17 @@ hashed locally enabling any hash for any remote.
Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum
Supported hashes are:
* MD5
* SHA-1
* DropboxHash
* QuickXorHash
` + hashListHelp(" ") + `
Then
$ rclone hashsum MD5 remote:path
Note that hash names are case insensitive.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 2, command, args)
if len(args) == 0 {
fmt.Printf("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
fmt.Printf(" * %v\n", ht)
}
fmt.Print(hashListHelp(""))
return nil
} else if len(args) == 1 {
return errors.New("need hash type and remote")
@@ -93,6 +88,7 @@ Then
var ht hash.Type
err := ht.Set(args[0])
if err != nil {
fmt.Println(hashListHelp(""))
return err
}
fsrc := cmd.NewFsSrc(args[1:])
@@ -111,3 +107,14 @@ Then
return nil
},
}
func hashListHelp(indent string) string {
var help strings.Builder
help.WriteString(indent)
help.WriteString("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
help.WriteString(indent)
fmt.Fprintf(&help, " * %v\n", ht.String())
}
return help.String()
}

View File

@@ -337,8 +337,8 @@ func makeRandomExeName(baseName, extension string) (string, error) {
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
osName := runtime.GOOS
arch := runtime.GOARCH
if arch == "darwin" {
arch = "osx"
if osName == "darwin" {
osName = "osx"
}
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)

View File

@@ -0,0 +1,94 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
)
// Help describes the options for the serve package
var Help = `--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:
| Parameter | Description |
| :---------- | :---------- |
| .Name | The full path of a file/directory. |
| .Title | Directory listing of .Name |
| .Sort | The current sort used. This is changeable via ?sort= parameter |
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
| .Order | The current ordering used. This is changeable via ?order= parameter |
| | Order Options: asc,desc (default asc) |
| .Query | Currently unused. |
| .Breadcrumb | Allows for creating a relative navigation |
|-- .Link | The relative to the root link of the Text. |
|-- .Text | The Name of the directory. |
| .Entries | Information about a specific file/directory. |
|-- .URL | The 'url' of an entry. |
|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. |
|-- .IsDir | Boolean for if an entry is a directory or not. |
|-- .Size | Size in Bytes of the entry. |
|-- .ModTime | The UTC timestamp of an entry. |
`
// Options for the templating functionality
type Options struct {
Template string
}
// AddFlags for the templating functionality
func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
}
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -1,7 +1,9 @@
package http
import (
"html/template"
"io"
"log"
"net/http"
"os"
"path"
@@ -9,19 +11,35 @@ import (
"strings"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/auth"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// Options required for http server
type Options struct {
data.Options
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{}
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
httpflags.AddFlags(Command.Flags())
data.AddFlags(Command.Flags(), "", &Opt.Options)
httplib.AddFlags(Command.Flags())
auth.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
}
@@ -40,17 +58,17 @@ The server will log errors. Use -v to see access logs.
--bwlimit will be respected for file transfers. Use --stats to
control the stats printing.
` + httplib.Help + vfs.Help,
` + httplib.Help + data.Help + auth.Help + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, &httpflags.Opt)
err := s.Serve()
s := newServer(f, Opt.Template)
router, err := httplib.Router()
if err != nil {
return err
}
s.Wait()
s.Bind(router)
return nil
})
},
@@ -58,49 +76,37 @@ control the stats printing.
// server contains everything to run the server
type server struct {
*httplib.Server
f fs.Fs
vfs *vfs.VFS
f fs.Fs
vfs *vfs.VFS
HTMLTemplate *template.Template // HTML template for web interface
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
Server: httplib.NewServer(mux, opt),
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
func newServer(f fs.Fs, templatePath string) *server {
htmlTemplate, templateErr := data.GetTemplate(templatePath)
if templateErr != nil {
log.Fatalf(templateErr.Error())
}
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
HTMLTemplate: htmlTemplate,
}
mux.HandleFunc(s.Opt.BaseURL+"/", s.handler)
return s
}
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.Server.Serve()
if err != nil {
return err
}
fs.Logf(s.f, "Serving on %s", s.URL())
return nil
func (s *server) Bind(router chi.Router) {
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
)
router.Get("/*", s.handler)
router.Head("/*", s.handler)
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
urlPath, ok := s.Path(w, r)
if !ok {
return
}
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
isDir := strings.HasSuffix(r.URL.Path, "/")
remote := strings.Trim(r.URL.Path, "/")
if isDir {
s.serveDir(w, r, remote)
} else {

View File

@@ -10,10 +10,10 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/filter"
httplib "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -32,10 +32,13 @@ const (
func startServer(t *testing.T, f fs.Fs) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
opt.Template = testTemplate
httpServer = newServer(f, &opt)
assert.NoError(t, httpServer.Serve())
testURL = httpServer.Server.URL()
httpServer = newServer(f, testTemplate)
router, err := httplib.Router()
if err != nil {
t.Fatal(err.Error())
}
httpServer.Bind(router)
testURL = httplib.URL()
// try to connect to the test server
pause := time.Millisecond
@@ -227,6 +230,5 @@ func TestGET(t *testing.T) {
}
func TestFinalise(t *testing.T) {
httpServer.Close()
httpServer.Wait()
_ = httplib.Shutdown()
}

View File

@@ -1 +1 @@
Method not allowed
Method Not Allowed

View File

@@ -1 +1 @@
Method not allowed
Method Not Allowed

View File

@@ -1,4 +1,6 @@
// Package httplib provides common functionality for http servers
//
// Deprecated: httplib has been replaced with lib/http
package httplib
import (
@@ -17,7 +19,7 @@ import (
auth "github.com/abbot/go-http-auth"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/httplib/serve/data"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/fs"
)

View File

@@ -1,56 +0,0 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -15,13 +15,13 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
"golang.org/x/net/http2"

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"net"
"os"
"regexp"
"strings"
@@ -14,7 +15,9 @@ import (
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/terminal"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"golang.org/x/crypto/ssh"
)
@@ -225,19 +228,8 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
// Wait for either subsystem "sftp" or "exec" request
if <-isSFTP {
fs.Debugf(c.what, "Starting SFTP server")
server := sftp.NewRequestServer(channel, c.handlers)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(c.what, "Failed to close server: %v", err)
}
}()
err = server.Serve()
if err == io.EOF || err == nil {
fs.Debugf(c.what, "exited session")
} else {
fs.Errorf(c.what, "completed with error: %v", err)
if err := serveChannel(channel, c.handlers, c.what); err != nil {
fs.Errorf(c.what, "Failed to serve SFPT: %v", err)
}
} else {
var rc = uint32(0)
@@ -263,3 +255,54 @@ func (c *conn) handleChannels(chans <-chan ssh.NewChannel) {
go c.handleChannel(newChannel)
}
}
func serveChannel(rwc io.ReadWriteCloser, h sftp.Handlers, what string) error {
fs.Debugf(what, "Starting SFTP server")
server := sftp.NewRequestServer(rwc, h)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(what, "Failed to close server: %v", err)
}
}()
err := server.Serve()
if err != nil && err != io.EOF {
return errors.Wrap(err, "completed with error")
}
fs.Debugf(what, "exited session")
return nil
}
func serveStdio(f fs.Fs) error {
if terminal.IsTerminal(int(os.Stdout.Fd())) {
return errors.New("refusing to run SFTP server directly on a terminal. Please let sshd start rclone, by connecting with sftp or sshfs")
}
sshChannel := &stdioChannel{
stdin: os.Stdin,
stdout: os.Stdout,
}
handlers := newVFSHandler(vfs.New(f, &vfsflags.Opt))
return serveChannel(sshChannel, handlers, "stdio")
}
type stdioChannel struct {
stdin *os.File
stdout *os.File
}
func (c *stdioChannel) Read(data []byte) (int, error) {
return c.stdin.Read(data)
}
func (c *stdioChannel) Write(data []byte) (int, error) {
return c.stdout.Write(data)
}
func (c *stdioChannel) Close() error {
err1 := c.stdin.Close()
err2 := c.stdout.Close()
if err1 != nil {
return err1
}
return err2
}

View File

@@ -27,6 +27,7 @@ type Options struct {
User string // single username
Pass string // password for user
NoAuth bool // allow no authentication on connections
Stdio bool // serve on stdio
}
// DefaultOpt is the default values used for Options
@@ -47,6 +48,7 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.")
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout")
}
func init() {
@@ -90,6 +92,11 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
If --stdio is specified, rclone will serve SFTP over stdio, which can
be used with sshd via ~/.ssh/authorized_keys, for example:
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
` + vfs.Help + proxy.Help,
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
@@ -100,6 +107,9 @@ sftp backend, but it may not be with other SFTP clients.
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, true, command, func() error {
if Opt.Stdio {
return serveStdio(f)
}
s := newServer(context.Background(), f, &Opt)
err := s.Serve()
if err != nil {

View File

@@ -11,13 +11,13 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"

View File

@@ -43,7 +43,6 @@ using local disk.
Virtual backends wrap local and cloud file systems to apply
[encryption](/crypt/),
[caching](/cache/),
[compression](/compress/)
[chunking](/chunker/) and
[joining](/union/).
@@ -147,6 +146,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}

View File

@@ -488,3 +488,19 @@ put them back in again.` >}}
* Kenny Parsons <kennyparsons93@gmail.com>
* Jeffrey Tolar <tolar.jeffrey@gmail.com>
* jtagcat <git-514635f7@jtag.cat>
* Tatsuya Noyori <63089076+public-tatsuya-noyori@users.noreply.github.com>
* lewisxy <lewisxy@users.noreply.github.com>
* Nolan Woods <nolan_w@sfu.ca>
* Gautam Kumar <25435568+gautamajay52@users.noreply.github.com>
* Chris Macklin <chris.macklin@10xgenomics.com>
* Antoon Prins <antoon.prins@surfsara.nl>
* Alexey Ivanov <rbtz@dropbox.com>
* sp31415t1 <33207650+sp31415t1@users.noreply.github.com>
* acsfer <carlos@reendex.com>
* Tom <tom@tom-fitzhenry.me.uk>
* Tyson Moore <tyson@tyson.me>
* database64128 <free122448@hotmail.com>
* Chris Lu <chrislusf@users.noreply.github.com>
* Reid Buzby <reid@rethink.software>
* darrenrhs <darrenrhs@gmail.com>
* Florian Penzkofer <fp@nullptr.de>

View File

@@ -3,7 +3,7 @@ title: "Cache"
description: "Rclone docs for cache remote"
---
{{< icon "fa fa-archive" >}} Cache (BETA)
{{< icon "fa fa-archive" >}} Cache (DEPRECATED)
-----------------------------------------
The `cache` remote wraps another existing remote and stores file structure

View File

@@ -12,10 +12,10 @@ Get quota information from the remote.
## Synopsis
`rclone about`prints quota information about a remote to standard
`rclone about` prints quota information about a remote to standard
output. The output is typically used, free, quota and trash contents.
E.g. Typical output from`rclone about remote:`is:
E.g. Typical output from `rclone about remote:` is:
Total: 17G
Used: 7.444G
@@ -43,7 +43,7 @@ Applying a `--full` flag to the command prints the bytes in full, e.g.
Trashed: 104857602
Other: 8849156022
A `--json`flag generates conveniently computer readable output, e.g.
A `--json` flag generates conveniently computer readable output, e.g.
{
"total": 18253611008,

View File

@@ -35,12 +35,12 @@ See the [global flags page](/flags/) for global options not listed here.
* [rclone config delete](/commands/rclone_config_delete/) - Delete an existing remote `name`.
* [rclone config disconnect](/commands/rclone_config_disconnect/) - Disconnects user from remote
* [rclone config dump](/commands/rclone_config_dump/) - Dump the config file as JSON.
* [rclone config edit](/commands/rclone_config_edit/) - Enter an interactive configuration session.
* [rclone config file](/commands/rclone_config_file/) - Show path of configuration file in use.
* [rclone config password](/commands/rclone_config_password/) - Update password in an existing remote.
* [rclone config providers](/commands/rclone_config_providers/) - List in JSON format all the providers and options.
* [rclone config reconnect](/commands/rclone_config_reconnect/) - Re-authenticates user with remote.
* [rclone config show](/commands/rclone_config_show/) - Print (decrypted) config file, or the config for a single remote.
* [rclone config touch](/commands/rclone_config_touch/) - Ensure configuration file exists.
* [rclone config update](/commands/rclone_config_update/) - Update options in an existing remote.
* [rclone config userinfo](/commands/rclone_config_userinfo/) - Prints info about logged in user of remote.

View File

@@ -13,16 +13,23 @@ Create a new remote with name, type and options.
Create a new remote of `name` with `type` and options. The options
should be passed in pairs of `key` `value`.
should be passed in pairs of `key` `value` or as `key=value`.
For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local=false
Note that if the config process would normally ask a question the
default is taken. Each time that happens rclone will print a message
saying how to affect the value taken.
default is taken (unless `--non-interactive` is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
If any of the parameters passed is a password field, then rclone will
automatically obscure them if they aren't already obscured before
@@ -32,15 +39,79 @@ putting them in the config file.
consists only of base64 characters then rclone can get confused about
whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the "--obscure" flag, or if you
the passwords get obscured then use the `--obscure` flag, or if you
are 100% certain you are already passing obscured passwords then use
"--no-obscure". You can also set obscured passwords using the
"rclone config password" command.
`--no-obscure`. You can also set obscured passwords using the
`rclone config password` command.
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
The flag `--non-interactive` is for use by applications that wish to
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.
rclone config create mydrive drive config_is_local false
This will look something like (some irrelevant detail removed):
```
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
```
The format of `Option` is the same as returned by `rclone config
providers`. The question should be asked to the user and returned to
rclone as the `--result` option along with the `--state` parameter.
The keys of `Option` are used as follows:
- `Name` - name of variable - show to user
- `Help` - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
- `Default` - default value - return this if the user just wants the default.
- `Examples` - the user should be able to choose one of these
- `Required` - the value should be non-empty
- `IsPassword` - the value is a password and should be edited as such
- `Type` - type of value, eg `bool`, `string`, `int` and others
- `Exclusive` - if set no free-form entry allowed only the `Examples`
- Irrelevant keys `Provider`, `ShortOpt`, `Hide`, `NoPrefix`, `Advanced`
If `Error` is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
Note that when using `--continue` all passwords should be passed in
the clear (not obscured). Any default config values should be passed
in with each invocation of `--continue`.
At the end of the non interactive process, rclone will return a result
with `State` as empty string.
If `--all` is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration.
```
@@ -50,9 +121,14 @@ rclone config create `name` `type` [`key` `value`]* [flags]
## Options
```
-h, --help help for create
--no-obscure Force any passwords not to be obscured.
--obscure Force any passwords to be obscured.
--all Ask the full set of config questions.
--continue Continue the configuration process with an answer.
-h, --help help for create
--no-obscure Force any passwords not to be obscured.
--non-interactive Don't interact with user and return questions.
--obscure Force any passwords to be obscured.
--result string Result - use with --continue.
--state string State - use with --continue.
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -9,7 +9,7 @@ url: /commands/rclone_config_edit/
Enter an interactive configuration session.
## Synopsis
# Synopsis
Enter an interactive configuration session where you can setup new
remotes and manage existing ones. You may also set or remove a
@@ -20,7 +20,7 @@ password to protect your configuration.
rclone config edit [flags]
```
## Options
# Options
```
-h, --help help for edit
@@ -28,7 +28,7 @@ rclone config edit [flags]
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
# SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -13,11 +13,13 @@ Update password in an existing remote.
Update an existing remote's password. The password
should be passed in pairs of `key` `value`.
should be passed in pairs of `key` `password` or as `key=password`.
The `password` should be passed in in clear (unobscured).
For example to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.

View File

@@ -0,0 +1,27 @@
---
title: "rclone config touch"
description: "Ensure configuration file exists."
slug: rclone_config_touch
url: /commands/rclone_config_touch/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/touch/ and as part of making a release run "make commanddocs"
---
# rclone config touch
Ensure configuration file exists.
```
rclone config touch [flags]
```
## Options
```
-h, --help help for touch
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session.

View File

@@ -13,12 +13,23 @@ Update options in an existing remote.
Update an existing remote's options. The options should be passed in
in pairs of `key` `value`.
pairs of `key` `value` or as `key=value`.
For example to update the env_auth field of a remote of name myremote
you would do:
rclone config update myremote swift env_auth true
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote swift env_auth=true config_refresh_token=false
Note that if the config process would normally ask a question the
default is taken (unless `--non-interactive` is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
If any of the parameters passed is a password field, then rclone will
automatically obscure them if they aren't already obscured before
@@ -28,15 +39,79 @@ putting them in the config file.
consists only of base64 characters then rclone can get confused about
whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the "--obscure" flag, or if you
the passwords get obscured then use the `--obscure` flag, or if you
are 100% certain you are already passing obscured passwords then use
"--no-obscure". You can also set obscured passwords using the
"rclone config password" command.
`--no-obscure`. You can also set obscured passwords using the
`rclone config password` command.
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
The flag `--non-interactive` is for use by applications that wish to
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.
rclone config update myremote swift env_auth true config_refresh_token false
This will look something like (some irrelevant detail removed):
```
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
```
The format of `Option` is the same as returned by `rclone config
providers`. The question should be asked to the user and returned to
rclone as the `--result` option along with the `--state` parameter.
The keys of `Option` are used as follows:
- `Name` - name of variable - show to user
- `Help` - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
- `Default` - default value - return this if the user just wants the default.
- `Examples` - the user should be able to choose one of these
- `Required` - the value should be non-empty
- `IsPassword` - the value is a password and should be edited as such
- `Type` - type of value, eg `bool`, `string`, `int` and others
- `Exclusive` - if set no free-form entry allowed only the `Examples`
- Irrelevant keys `Provider`, `ShortOpt`, `Hide`, `NoPrefix`, `Advanced`
If `Error` is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
Note that when using `--continue` all passwords should be passed in
the clear (not obscured). Any default config values should be passed
in with each invocation of `--continue`.
At the end of the non interactive process, rclone will return a result
with `State` as empty string.
If `--all` is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration.
```
@@ -46,9 +121,14 @@ rclone config update `name` [`key` `value`]+ [flags]
## Options
```
-h, --help help for update
--no-obscure Force any passwords not to be obscured.
--obscure Force any passwords to be obscured.
--all Ask the full set of config questions.
--continue Continue the configuration process with an answer.
-h, --help help for update
--no-obscure Force any passwords not to be obscured.
--non-interactive Don't interact with user and return questions.
--obscure Force any passwords to be obscured.
--result string Result - use with --continue.
--state string State - use with --continue.
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -15,9 +15,9 @@ Copy url content to dest.
Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting `--auto-filename`will cause the file name to be retrieved from
the from URL (after any redirections) and used in the destination
path. With `--print-filename` in addition, the resuling file name will
Setting `--auto-filename` will cause the file name to be retrieved from
the URL (after any redirections) and used in the destination
path. With `--print-filename` in addition, the resulting file name will
be printed.
Setting `--no-clobber` will prevent overwriting file on the

View File

@@ -23,8 +23,8 @@ If you supply the `--rmdirs` flag, it will remove all empty directories along wi
You can also use the separate command `rmdir` or `rmdirs` to
delete empty directories only.
For example, to delete all files bigger than 100 MiByte, you may first want to check what
would be deleted (use either):
For example, to delete all files bigger than 100 MiB, you may first want to
check what would be deleted (use either):
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
@@ -34,7 +34,7 @@ Then proceed with the actual delete:
rclone --min-size 100M delete remote:path
That reads "delete everything with a minimum size of 100 MiB", hence
delete all files bigger than 100 MiByte.
delete all files bigger than 100 MiB.
**Important**: Since this can cause data loss, test first with the
`--dry-run` or the `--interactive`/`-i` flag.

View File

@@ -25,15 +25,20 @@ Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum
Supported hashes are:
* MD5
* SHA-1
* DropboxHash
* QuickXorHash
* md5
* sha1
* whirlpool
* crc32
* dropbox
* mailru
* quickxor
Then
$ rclone hashsum MD5 remote:path
Note that hash names are case insensitive.
```
rclone hashsum <hash> remote:path [flags]

View File

@@ -41,7 +41,7 @@ rclone link remote:path [flags]
## Options
```
--expire Duration The amount of time that the link will be valid (default 100y)
--expire Duration The amount of time that the link will be valid (default off)
-h, --help help for link
--unlink Remove existing public link to file/folder
```

View File

@@ -143,7 +143,7 @@ rclone lsf remote:path [flags]
--dirs-only Only list directories.
--files-only Only list files.
-F, --format string Output format - see help for details (default "p")
--hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "MD5")
--hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
-h, --help help for lsf
-R, --recursive Recurse into the listing.
-s, --separator string Separator for the items in the format. (default ";")

View File

@@ -184,7 +184,7 @@ metadata about files like in UNIX. One case that may arise is that other program
(incorrectly) interprets this as the file being accessible by everyone. For example
an SSH client may warn about "unprotected private key file".
WinFsp 2021 (version 1.9, still in beta) introduces a new FUSE option "FileSecurity",
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
that allows the complete specification of file security descriptors using
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
With this you can work around issues such as the mentioned "unprotected private key file"
@@ -192,19 +192,38 @@ by specifying `-o FileSecurity="D:P(A;;FA;;;OW)"`, for file all access (FA) to t
### Windows caveats
Note that drives created as Administrator are not visible by other
accounts (including the account that was elevated as
Administrator). So if you start a Windows drive from an Administrative
Command Prompt and then try to access the same drive from Explorer
(which does not run as Administrator), you will not be able to see the
new drive.
Drives created as Administrator are not visible to other accounts,
not even an account that was elevated to Administrator with the
User Account Control (UAC) feature. A result of this is that if you mount
to a drive letter from a Command Prompt run as Administrator, and then try
to access the same drive from Windows Explorer (which does not run as
Administrator), you will not be able to see the mounted drive.
The easiest way around this is to start the drive from a normal
command prompt. It is also possible to start a drive from the SYSTEM
account (using [the WinFsp.Launcher
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage).
If you don't need to access the drive from applications running with
administrative privileges, the easiest way around this is to always
create the mount from a non-elevated command prompt.
To make mapped drives available to the user account that created them
regardless if elevated or not, there is a special Windows setting called
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
that can be enabled.
It is also possible to make a drive mount available to everyone on the system,
by running the process creating it as the built-in SYSTEM account.
There are several ways to do this: One is to use the command-line
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
from Microsoft's Sysinternals suite, which has option `-s` to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [`--config`](https://rclone.org/docs/#config-config-file) option.
Read more in the [install documentation](https://rclone.org/install/).
Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations.
## Limitations
@@ -313,7 +332,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -578,7 +597,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.
--gid uint32 Override the gid field set by the filesystem. Not supported on Windows. (default 1000)
-h, --help help for mount
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads. Not supported on Windows. (default 128k)
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads. Not supported on Windows. (default 128Ki)
--network-mode Mount as remote network drive, instead of fixed disk drive. Supported on Windows only
--no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up).
@@ -589,14 +608,14 @@ rclone mount remote:path /path/to/mountpoint [flags]
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows.
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -33,7 +33,6 @@ Here are the keys - press '?' to toggle the help on and off
a toggle average size in directory
n,s,C,A sort by name,size,count,average size
d delete file/directory
y copy current path to clipboard
Y display current path
^L refresh screen
? to toggle help on and off

View File

@@ -30,6 +30,13 @@ must fit into RAM. The cutoff needs to be small enough to adhere
the limits of your remote, please see there. Generally speaking,
setting this cutoff too high will decrease your performance.
Use the |--size| flag to preallocate the file in advance at the remote end
and actually stream it, even if remote backend doesn't support streaming.
|--size| should be the exact size of the input stream in bytes. If the
size of the stream is different in length to the |--size| passed in
then the transfer will likely fail.
Note that the upload can also not be retried because the data is
not kept around until the upload succeeds. If you need to transfer
a lot of data, you're better off caching locally and then
@@ -42,7 +49,8 @@ rclone rcat remote:path [flags]
## Options
```
-h, --help help for rcat
-h, --help help for rcat
--size int File size hint to preallocate (default -1)
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -37,7 +37,7 @@ If the old version contains only dots and digits (for example `v1.54.0`)
then it's a stable release so you won't need the `--beta` flag. Beta releases
have an additional information similar to `v1.54.0-beta.5111.06f1c0c61`.
(if you are a developer and use a locally built rclone, the version number
will end with `-DEV`, you will have to rebuild it as it obvisously can't
will end with `-DEV`, you will have to rebuild it as it obviously can't
be distributed).
If you previously installed rclone via a package manager, the package may

View File

@@ -55,7 +55,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -319,14 +319,14 @@ rclone serve dlna remote:path [flags]
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 2)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -0,0 +1,343 @@
---
title: "rclone serve docker"
description: "Serve any remote on docker's volume plugin API."
slug: rclone_serve_docker
url: /commands/rclone_serve_docker/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/serve/docker/ and as part of making a release run "make commanddocs"
---
# rclone serve docker
Serve any remote on docker's volume plugin API.
# Synopsis
rclone serve docker implements docker's volume plugin API.
This allows docker to use rclone as a data storage mechanism for various cloud providers.
# VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects
that rclone uses into something which looks much more like a disk
filing system.
Cloud storage objects have lots of properties which aren't like disk
files - you can't extend them or write to the middle of them, so the
VFS layer has to deal with that. Because there is no one right way of
doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info
about files and directories (but not the data) in memory.
# VFS Directory Cache
Using the `--dir-cache-time` flag, you can control how long a
directory should be considered up to date and not refreshed from the
backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
the directory cache expires if the backend configured does not support
polling for changes. If the backend supports polling, changes will be
picked up within the polling interval.
You can send a `SIGHUP` signal to rclone for it to flush all
directory caches, regardless of how old they are. Assuming only one
rclone instance is running, you can reset the cache like this:
kill -SIGHUP $(pidof rclone)
If you configure rclone with a [remote control](/rc) then you can use
rclone rc to flush the whole directory cache:
rclone rc vfs/forget
Or individual files or directories:
rclone rc vfs/forget file=path/to/file dir=path/to/dir
# VFS File Buffering
The `--buffer-size` flag determines the amount of memory,
that will be used to buffer data in advance.
Each open file will try to keep the specified amount of data in memory
at all times. The buffered data is bound to one open file and won't be
shared.
This flag is a upper limit for the used memory per open file. The
buffer will only use memory for data that is downloaded but not not
yet read. If the buffer is empty, only a small amount of memory will
be used.
The maximum memory used by rclone for buffering can be up to
`--buffer-size * open files`.
# VFS File Caching
These flags control the VFS file caching options. File caching is
necessary to make the VFS layer appear compatible with a normal file
system. It can be disabled at the cost of some compatibility.
For example you'll need to enable VFS caching if you want to read and
write simultaneously to a file. See below for more details.
Note that the VFS cache is separate from the cache backend and you may
find that you need one or the other or both.
--cache-dir string Directory rclone will use for caching.
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
If run with `-vv` rclone will print the location of the file cache. The
files are stored in the user cache file area which is OS dependent but
can be controlled with `--cache-dir` or setting the appropriate
environment variable.
The cache has 4 different modes selected by `--vfs-cache-mode`.
The higher the cache mode the more compatible rclone becomes at the
cost of using disk space.
Note that files are written back to the remote only when they are
closed and if they haven't been accessed for --vfs-write-back
second. If rclone is quit or dies with files that haven't been
uploaded, these will be uploaded next time rclone is run with the same
flags.
If using `--vfs-cache-max-size` note that the cache may exceed this size
for two reasons. Firstly because it is only checked every
`--vfs-cache-poll-interval`. Secondly because open files cannot be
evicted from the cache.
You **should not** run two copies of rclone using the same VFS cache
with the same or overlapping remotes if using `--vfs-cache-mode > off`.
This can potentially cause data corruption if you do. You can work
around this by giving each rclone its own cache hierarchy with
`--cache-dir`. You don't need to worry about this if the remotes in
use don't overlap.
## --vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write
directly to the remote without caching anything on disk.
This will mean some operations are not possible
* Files can't be opened for both read AND write
* Files opened for write can't be seeked
* Existing files opened for write must have O_TRUNC set
* Files open for read with O_TRUNC will be opened write only
* Files open for write only will behave as if O_TRUNC was supplied
* Open modes O_APPEND, O_TRUNC are ignored
* If an upload fails it can't be retried
## --vfs-cache-mode minimal
This is very similar to "off" except that files opened for read AND
write will be buffered to disk. This means that files opened for
write will be a lot more compatible, but uses the minimal disk space.
These operations are not possible
* Files opened for write only can't be seeked
* Existing files opened for write must have O_TRUNC set
* Files opened for write only will ignore O_APPEND, O_TRUNC
* If an upload fails it can't be retried
## --vfs-cache-mode writes
In this mode files opened for read only are still read directly from
the remote, write only and read/write files are buffered to disk
first.
This mode should support all normal file system operations.
If an upload fails it will be retried at exponentially increasing
intervals up to 1 minute.
## --vfs-cache-mode full
In this mode all reads and writes are buffered to and from disk. When
data is read from the remote this is buffered to disk as well.
In this mode the files in the cache will be sparse files and rclone
will keep track of which bits of the files it has downloaded.
So if an application only reads the starts of each file, then rclone
will only buffer the start of the file. These files will appear to be
their full size in the cache, but they will be sparse files with only
the data that has been downloaded present in them.
This mode should support all normal file system operations and is
otherwise identical to --vfs-cache-mode writes.
When reading a file rclone will read --buffer-size plus
--vfs-read-ahead bytes ahead. The --buffer-size is buffered in memory
whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
# VFS Performance
These flags may be used to enable/disable features of the VFS for
performance or other reasons.
In particular S3 and Swift benefit hugely from the --no-modtime flag
(or use --use-server-modtime for a slightly different effect) as each
read of the modification time takes a transaction.
--no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files.
--read-only Mount read-only.
When rclone reads files from a remote it reads them in chunks. This
means that rather than requesting the whole file rclone reads the
chunk specified. This is advantageous because some cloud providers
account for reads being all the data requested, not all the data
delivered.
Rclone will keep doubling the chunk size requested starting at
--vfs-read-chunk-size with a maximum of --vfs-read-chunk-size-limit
unless it is set to "off" in which case there will be no limit.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default "off")
Sometimes rclone is delivered reads or writes out of order. Rather
than seeking rclone will wait a short time for the in sequence read or
write to come in. These flags only come into effect when not using an
on disk cache file.
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
When using VFS write caching (--vfs-cache-mode with value writes or full),
the global flag --transfers can be set to adjust the number of parallel uploads of
modified files from cache (the related global flag --checkers have no effect on mount).
--transfers int Number of file transfers to run in parallel. (default 4)
# VFS Case Sensitivity
Linux file systems are case-sensitive: two files can differ only
by case, and the exact case must be used when opening a file.
File systems in modern Windows are case-insensitive but case-preserving:
although existing files can be opened using any case, the exact case used
to create the file is preserved and available for programs to query.
It is not allowed for two files in the same directory to differ only by case.
Usually file systems on macOS are case-insensitive. It is possible to make macOS
file systems case-sensitive but that is not the default
The `--vfs-case-insensitive` mount flag controls how rclone handles these
two cases. If its value is "false", rclone passes file names to the mounted
file system as-is. If the flag is "true" (or appears without a value on
command line), rclone may perform a "fixup" as explained below.
The user may specify a file name to open/delete/rename/etc with a case
different than what is stored on mounted file system. If an argument refers
to an existing file with exactly the same name, then the case of the existing
file on the disk will be used. However, if a file name with exactly the same
name is not found but a name differing only by case exists, rclone will
transparently fixup the name. This fixup happens only when an existing file
is requested. Case sensitivity of file names created anew by rclone is
controlled by an underlying mounted file system.
Note that case sensitivity of the operating system running rclone (the target)
may differ from case sensitivity of a file system mounted by rclone (the source).
The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
# Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used.
If you need this information to be available when running `df` on the
filesystem, then pass the flag `--vfs-used-is-size` to rclone.
With this flag set, instead of relying on the backend to report this
information, rclone will scan the whole remote similar to `rclone size`
and compute the total used space itself.
_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
result is accurate. However, this is very inefficient and may cost lots of API
calls resulting in extra charges. Use it as a last resort and only with caching.
```
rclone serve docker [flags]
```
# Options
```
--allow-non-empty Allow mounting over a non-empty directory. Not supported on Windows.
--allow-other Allow access to other users. Not supported on Windows.
--allow-root Allow access to root user. Not supported on Windows.
--async-read Use asynchronous reads. Not supported on Windows. (default true)
--attr-timeout duration Time for which file/directory attributes are cached. (default 1s)
--base-dir string base directory for volumes (default "/var/lib/docker/plugins/rclone/volumes")
--daemon Run mount as a daemon (background mode). Not supported on Windows.
--daemon-timeout duration Time limit for rclone to respond to kernel. Not supported on Windows.
--debug-fuse Debug the FUSE internals - needs -v.
--default-permissions Makes kernel enforce access control based on the file mode. Not supported on Windows.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
--file-perms FileMode File permissions (default 0666)
--forget-state skip restoring previous state
--fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.
--gid uint32 Override the gid field set by the filesystem. Not supported on Windows. (default 1000)
-h, --help help for docker
--max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads. Not supported on Windows. (default 128Ki)
--network-mode Mount as remote network drive, instead of fixed disk drive. Supported on Windows only
--no-checksum Don't compare checksums on up/download.
--no-modtime Don't read/write the modification time (can speed things up).
--no-seek Don't allow seeking in files.
--no-spec do not write spec file
--noappledouble Ignore Apple Double (._) and .DS_Store files. Supported on OSX only. (default true)
--noapplexattr Ignore all "com.apple.*" extended attributes. Supported on OSX only.
-o, --option stringArray Option for libfuse/WinFsp. Repeat if required.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
--socket-addr string <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.
--vfs-write-back duration Time to writeback files after last use when using cache. (default 5s)
--vfs-write-wait duration Time to wait for in-sequence write before giving error. (default 1s)
--volname string Set the volume name. Supported on Windows and OSX only.
--write-back-cache Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used. Not supported on Windows.
```
See the [global flags page](/flags/) for global options not listed here.
# SEE ALSO
* [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol.

View File

@@ -54,7 +54,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -403,7 +403,7 @@ rclone serve ftp remote:path [flags]
--public-ip string Public IP address to advertise for passive connections.
--read-only Mount read-only.
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 2)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--user string User name for authentication. (default "anonymous")
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
@@ -411,7 +411,7 @@ rclone serve ftp remote:path [flags]
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -26,7 +26,7 @@ control the stats printing.
## Server options
Use --addr to specify which IP address and port the server should
listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all
listen on, eg --addr 1.2.3.4:8000 or --addr :8080 to listen to all
IPs. By default it only listens on localhost. You can use port
:0 to let the OS choose an available port.
@@ -48,6 +48,17 @@ inserts leading and trailing "/" on --baseurl, so --baseurl "rclone",
--baseurl "/rclone" and --baseurl "/rclone/" are all treated
identically.
### SSL/TLS
By default this will serve over http. If you want you can serve over
https. You will need to supply the --cert and --key flags. If you
wish to do client side certificate validation then you will need to
supply --client-ca also.
--cert should be a either a PEM encoded certificate or a concatenation
of that with the CA certificate. --key should be the PEM encoded
private key and --client-ca should be the PEM encoded client
certificate authority certificate.
--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:
@@ -92,18 +103,6 @@ The password file can be updated while rclone is running.
Use --realm to set the authentication realm.
### SSL/TLS
By default this will serve over http. If you want you can serve over
https. You will need to supply the --cert and --key flags. If you
wish to do client side certificate validation then you will need to
supply --client-ca also.
--cert should be either a PEM encoded certificate or a concatenation
of that with the CA certificate. --key should be the PEM encoded
private key and --client-ca should be the PEM encoded client
certificate authority certificate.
## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects
@@ -126,7 +125,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -376,7 +375,7 @@ rclone serve http remote:path [flags]
## Options
```
--addr string IPaddress:Port or :Port to bind server to. (default "localhost:8080")
--addr string IPaddress:Port or :Port to bind server to. (default "127.0.0.1:8080")
--baseurl string Prefix for URLs - leave blank for root.
--cert string SSL PEM key (concatenation of certificate and CA certificate)
--client-ca string Client certificate authority to verify clients with
@@ -394,12 +393,12 @@ rclone serve http remote:path [flags]
--pass string Password for authentication.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
--realm string realm for authentication (default "rclone")
--realm string realm for authentication
--server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--template string User Specified Template.
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 2)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--user string User name for authentication.
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
@@ -407,7 +406,7 @@ rclone serve http remote:path [flags]
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -42,6 +42,11 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
If --stdio is specified, rclone will serve SFTP over stdio, which can
be used with sshd via ~/.ssh/authorized_keys, for example:
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
## VFS - Virtual File System
@@ -65,7 +70,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -412,8 +417,9 @@ rclone serve sftp remote:path [flags]
--pass string Password for authentication.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
--read-only Mount read-only.
--stdio Run an sftp server on run stdin/stdout
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 2)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--user string User name for authentication.
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
@@ -421,7 +427,7 @@ rclone serve sftp remote:path [flags]
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -134,7 +134,7 @@ backend. Changes made through the mount will appear immediately or
invalidate the cache.
--dir-cache-time duration Time to cache directory entries for. (default 5m0s)
--poll-interval duration Time to wait between polling for changes.
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable. (default 1m0s)
However, changes made directly on the cloud storage by the web
interface or a different copy of rclone will only be picked up once
@@ -491,7 +491,7 @@ rclone serve webdav remote:path [flags]
--server-write-timeout duration Timeout for server writing data (default 1h0m0s)
--template string User Specified Template.
--uid uint32 Override the uid field set by the filesystem. Not supported on Windows. (default 1000)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 2)
--umask int Override the permission bits set by the filesystem. Not supported on Windows. (default 18)
--user string User name for authentication.
--vfs-cache-max-age duration Max age of objects in the cache. (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache. (default off)
@@ -499,7 +499,7 @@ rclone serve webdav remote:path [flags]
--vfs-cache-poll-interval duration Interval to poll the cache for stale objects. (default 1m0s)
--vfs-case-insensitive If a file name not found, find a case insensitive match.
--vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full.
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128M)
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks. (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached. 'off' is unlimited. (default off)
--vfs-read-wait duration Time to wait for in-sequence read before seeking. (default 20ms)
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size.

View File

@@ -34,6 +34,7 @@ See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends.
* [rclone test changenotify](/commands/rclone_test_changenotify/) - Log any change notify requests for the remote passed in.
* [rclone test histogram](/commands/rclone_test_histogram/) - Makes a histogram of file name characters.
* [rclone test info](/commands/rclone_test_info/) - Discovers file name or other limitations for paths.
* [rclone test makefiles](/commands/rclone_test_makefiles/) - Make a random file hierarchy in <dir>

View File

@@ -0,0 +1,28 @@
---
title: "rclone test changenotify"
description: "Log any change notify requests for the remote passed in."
slug: rclone_test_changenotify
url: /commands/rclone_test_changenotify/
# autogenerated - DO NOT EDIT, instead edit the source code in cmd/test/changenotify/ and as part of making a release run "make commanddocs"
---
# rclone test changenotify
Log any change notify requests for the remote passed in.
```
rclone test changenotify remote: [flags]
```
## Options
```
-h, --help help for changenotify
--poll-interval duration Time to wait between polling for changes. (default 10s)
```
See the [global flags page](/flags/) for global options not listed here.
## SEE ALSO
* [rclone test](/commands/rclone_test/) - Run a test command

View File

@@ -23,6 +23,7 @@ rclone test makefiles <dir> [flags]
--max-name-length int Maximum size of file names (default 12)
--min-file-size SizeSuffix Minimum size of file to create
--min-name-length int Minimum size of file names (default 4)
--seed int Seed for the random number generator (0 for random) (default 1)
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -12,13 +12,16 @@ Show the version number.
## Synopsis
Show the rclone version number, the go version, the build target OS and
architecture, build tags and the type of executable (static or dynamic).
Show the rclone version number, the go version, the build target
OS and architecture, the runtime OS and kernel version and bitness,
build tags and the type of executable (static or dynamic).
For example:
$ rclone version
rclone v1.54
rclone v1.55.0
- os/version: ubuntu 18.04 (64 bit)
- os/kernel: 4.15.0-136-generic (x86_64)
- os/type: linux
- os/arch: amd64
- go/version: go1.16

View File

@@ -24,7 +24,6 @@ See the following for detailed instructions for
* [Amazon S3](/s3/)
* [Backblaze B2](/b2/)
* [Box](/box/)
* [Cache](/cache/)
* [Chunker](/chunker/) - transparently splits large files for other remotes
* [Citrix ShareFile](/sharefile/)
* [Compress](/compress/)
@@ -120,7 +119,7 @@ The main rclone commands with most used first
* [rclone mount](/commands/rclone_mount/) - Mount the remote as a mountpoint.
* [rclone moveto](/commands/rclone_moveto/) - Move file or directory from source to dest.
* [rclone obscure](/commands/rclone_obscure/) - Obscure password for use in the rclone.conf
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of a crypted remote.
* [rclone cryptcheck](/commands/rclone_cryptcheck/) - Check the integrity of an encrypted remote.
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
See the [commands index](/commands/) for the full list.
@@ -163,7 +162,7 @@ The syntax of the paths passed to the rclone command are as follows.
This refers to the local file system.
On Windows `\` may be used instead of `/` in local paths **only**,
non local paths must use `/`. See [local filesystem](https://rclone.org/local/#windows-paths)
non local paths must use `/`. See [local filesystem](https://rclone.org/local/#paths-on-windows)
documentation for more about Windows-specific paths.
These paths needn't start with a leading `/` - if they don't then they
@@ -787,6 +786,8 @@ rclone copy --dscp LE from:/from to:/to
```
would make the priority lower than usual internet flows.
This option has no effect on Windows (see [golang/go#42728](https://github.com/golang/go/issues/42728)).
### -n, --dry-run ###
Do a trial run with no permanent changes. Use this to see what rclone

File diff suppressed because it is too large Load Diff

View File

@@ -1324,8 +1324,11 @@ then select "OAuth client ID".
7. Choose an application type of "Desktop app" if you using a Google account or "Other" if
you using a GSuite account and click "Create". (the default name is fine)
8. It will show you a client ID and client secret. Use these values
in rclone config to add a new remote or edit an existing remote.
8. It will show you a client ID and client secret. Make a note of these.
9. Go to "Oauth consent screen" and press "Publish App"
10. Provide the noted client ID and client secret to rclone.
Be aware that, due to the "enhanced security" recently introduced by
Google, you are theoretically expected to "submit your app for verification"

View File

@@ -100,7 +100,7 @@ Dropbox supports [its own hash
type](https://www.dropbox.com/developers/reference/content-hash) which
is checked for all transfers.
#### Restricted filename characters
### Restricted filename characters
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|
@@ -119,6 +119,65 @@ These only get replaced if they are the last character in the name:
Invalid UTF-8 bytes will also be [replaced](/overview/#invalid-utf8),
as they can't be used in JSON strings.
### Batch mode uploads {#batch-mode}
Using batch mode uploads is very important for performance when using
the Dropbox API. See [the dropbox performance guide](https://developers.dropbox.com/dbx-performance-guide)
for more info.
There are 3 modes rclone can use for uploads.
#### --dropbox-batch-mode off
In this mode rclone will not use upload batching. This was the default
before rclone v1.55. It has the disadvantage that it is very likely to
encounter `too_many_requests` errors like this
NOTICE: too_many_requests/.: Too many requests or write operations. Trying again in 15 seconds.
When rclone receives these it has to wait for 15s or sometimes 300s
before continuing which really slows down transfers.
This will happen especially if `--transfers` is large, so this mode
isn't recommended except for compatibility or investigating problems.
#### --dropbox-batch-mode sync
In this mode rclone will batch up uploads to the size specified by
`--dropbox-batch-size` and commit them together.
Using this mode means you can use a much higher `--transfers`
parameter (32 or 64 works fine) without receiving `too_many_requests`
errors.
This mode ensures full data integrity.
Note that there may be a pause when quitting rclone while rclone
finishes up the last batch using this mode.
#### --dropbox-batch-mode async
In this mode rclone will batch up uploads to the size specified by
`--dropbox-batch-size` and commit them together.
However it will not wait for the status of the batch to be returned to
the caller. This means rclone can use a much bigger batch size (much
bigger than `--transfers`), at the cost of not being able to check the
status of the upload.
This provides the maximum possible upload speed especially with lots
of small files, however rclone can't check the file got uploaded
properly using this mode.
If you are using this mode then using "rclone check" after the
transfer completes is recommended. Or you could do an initial transfer
with `--dropbox-batch-mode async` then do a final transfer with
`--dropbox-batch-mode sync` (the default).
Note that there may be a pause when quitting rclone while rclone
finishes up the last batch using this mode.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/dropbox/dropbox.go then run make backenddocs" >}}
### Standard Options
@@ -308,6 +367,8 @@ to be the same account as the Dropbox you want to access)
5. Click the button `Create App`
5. Fill `Redirect URIs` as `http://localhost:53682/`
6. Switch to the `Permissions` tab. Enable at least the following permissions: `account_info.read`, `files.metadata.write`, `files.content.write`, `files.content.read`, `sharing.write`. The `files.metadata.read` and `sharing.read` checkboxes will be marked too. Click `Submit`
6. Find the `App key` and `App secret` Use these values in rclone config to add a new remote or edit an existing remote.
7. Switch to the `Settings` tab. Fill `OAuth2 - Redirect URIs` as `http://localhost:53682/`
8. Find the `App key` and `App secret` values on the `Settings` tab. Use these values in rclone config to add a new remote or edit an existing remote. The `App key` setting corresponds to `client_id` in rclone config, the `App secret` corresponds to `client_secret`

View File

@@ -35,26 +35,26 @@ you expect. Instead use a `--filter...` flag.
Rclone matching rules follow a glob style:
`*` matches any sequence of non-separator (`/`) characters
`**` matches any sequence of characters including `/` separators
`?` matches any single non-separator (`/`) character
`[` [ `!` ] { character-range } `]`
character class (must be non-empty)
`{` pattern-list `}`
pattern alternatives
c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
`\` c matches character c
* matches any sequence of non-separator (/) characters
** matches any sequence of characters including / separators
? matches any single non-separator (/) character
[ [ ! ] { character-range } ]
character class (must be non-empty)
{ pattern-list }
pattern alternatives
c matches character c (c != *, **, ?, \, [, {, })
\c matches reserved character c (c = *, **, ?, \, [, {, })
character-range:
c matches character c (c != `\\`, `-`, `]`)
`\` c matches character c
lo `-` hi matches character c for lo <= c <= hi
c matches character c (c != \, -, ])
\c matches reserved character c (c = \, -, ])
lo - hi matches character c for lo <= c <= hi
pattern-list:
pattern { `,` pattern }
comma-separated (without spaces) patterns
pattern { , pattern }
comma-separated (without spaces) patterns
character classes (see [Go regular expression reference](https://golang.org/pkg/regexp/syntax/)) include:

81
docs/content/flags.md Executable file → Normal file
View File

@@ -17,7 +17,7 @@ These flags are available for every command.
--auto-confirm If enabled, do not request console confirmation.
--backup-dir string Make backups into hierarchy based in DIR.
--bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name.
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16M)
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer. (default 16Mi)
--bwlimit BwTimetable Bandwidth limit in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.
--bwlimit-file BwTimetable Bandwidth limit per file in KiByte/s, or use suffix B|K|M|G|T|P or a full timetable.
--ca-cert string CA certificate used to verify servers
@@ -37,7 +37,8 @@ These flags are available for every command.
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
--delete-excluded Delete files on dest excluded from sync
--disable string Disable a comma separated list of features. Use help to see a list.
--disable string Disable a comma separated list of features. Use --disable help to see a list.
--disable-http2 Disable HTTP/2 in the global transport.
-n, --dry-run Do a trial run with no permanent changes
--dscp string Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.
--dump DumpFlags List of items to dump from: headers,bodies,requests,responses,auth,filters,goroutines,openfiles
@@ -86,7 +87,7 @@ These flags are available for every command.
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
--modify-window duration Max time diff to be considered the same (default 1ns)
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250M)
--multi-thread-cutoff SizeSuffix Use multi-thread downloads for files above this size. (default 250Mi)
--multi-thread-streams int Max number of streams to use for multi-thread downloads. (default 4)
--no-check-certificate Do not verify the server SSL certificate. Insecure.
--no-check-dest Don't check the destination, copy regardless.
@@ -136,8 +137,8 @@ These flags are available for every command.
--stats-one-line Make the stats fit on one line.
--stats-one-line-date Enables --stats-one-line and add current date/time prefix.
--stats-one-line-date-format string Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes ("). See https://golang.org/pkg/time/#Time.Format
--stats-unit string Show data rate in stats as either 'bits' or 'bytes'/s (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100k)
--stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes")
--streaming-upload-cutoff SizeSuffix Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends. (default 100Ki)
--suffix string Suffix to add to changed files.
--suffix-keep-extension Preserve the extension when using --suffix.
--syslog Use Syslog for logging
@@ -153,7 +154,7 @@ These flags are available for every command.
--use-json-log Use json log format.
--use-mmap Use mmap allocator (see docs).
--use-server-modtime Use server modified time instead of object metadata
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.55.0")
--user-agent string Set the user-agent to a specified string. The default is rclone/ version (default "rclone/v1.56.0-beta.5531.41f561bf2.pr-commanddocs")
-v, --verbose count Print lots more stuff (repeat for more)
```
@@ -167,7 +168,7 @@ and may be set in the config file.
--acd-client-id string OAuth Client Id
--acd-client-secret string OAuth Client Secret
--acd-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9G)
--acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink. (default 9Gi)
--acd-token string OAuth Access Token as a JSON blob.
--acd-token-url string Token server url.
--acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears. (default 3m0s)
@@ -175,7 +176,7 @@ and may be set in the config file.
--azureblob-access-tier string Access tier of blob: hot, cool or archive.
--azureblob-account string Storage Account Name (leave blank to use SAS URL or Emulator)
--azureblob-archive-tier-delete Delete archive tier blobs before overwriting.
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB). (default 4M)
--azureblob-chunk-size SizeSuffix Upload chunk size (<= 100 MiB). (default 4Mi)
--azureblob-disable-checksum Don't store MD5 checksum with object metadata.
--azureblob-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
--azureblob-endpoint string Endpoint for the service
@@ -193,8 +194,8 @@ and may be set in the config file.
--azureblob-use-emulator Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)
--azureblob-use-msi Use a managed service identity to authenticate (only works in Azure)
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96M)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4G)
--b2-chunk-size SizeSuffix Upload chunk size. Must fit in memory. (default 96Mi)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
--b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d. (default 1w)
--b2-download-url string Custom endpoint for downloads.
@@ -205,7 +206,7 @@ and may be set in the config file.
--b2-memory-pool-flush-time Duration How often internal memory buffer pools will be flushed. (default 1m0s)
--b2-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool.
--b2-test-mode string A flag string for X-Bz-Test-Mode header for debugging.
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200M)
--b2-upload-cutoff SizeSuffix Cutoff for switching to chunked upload. (default 200Mi)
--b2-versions Include old versions in directory listings.
--box-access-token string Box App Primary Access Token
--box-auth-url string Auth server URL.
@@ -218,12 +219,12 @@ and may be set in the config file.
--box-root-folder-id string Fill in for rclone to use a non root folder as its starting point.
--box-token string OAuth Access Token as a JSON blob.
--box-token-url string Token server url.
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB). (default 50M)
--box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB). (default 50Mi)
--cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage. (default 1m0s)
--cache-chunk-no-memory Disable the in-memory cache for storing chunks during streaming.
--cache-chunk-path string Directory to cache chunk files. (default "$HOME/.cache/rclone/cache-backend")
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5M)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10G)
--cache-chunk-size SizeSuffix The size of a chunk (partial file data). (default 5Mi)
--cache-chunk-total-size SizeSuffix The total size that the chunks can take up on the local disk. (default 10Gi)
--cache-db-path string Directory to store file structure metadata DB. (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start.
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
@@ -239,13 +240,13 @@ and may be set in the config file.
--cache-tmp-wait-time Duration How long should files be stored in local cache before being uploaded (default 15s)
--cache-workers int How many workers should run in parallel to download chunks. (default 4)
--cache-writes Cache file data on writes through the FS
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks. (default 2G)
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks. (default 2Gi)
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks.
--chunker-hash-type string Choose how chunker handles hash sums. All modes but "none" require metadata. (default "md5")
--chunker-remote string Remote to chunk/unchunk.
--compress-level int GZIP compression level (-2 to 9). (default -1)
--compress-mode string Compression mode. (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size. (default 20M)
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size. (default 20Mi)
--compress-remote string Remote to compress.
-L, --copy-links Follow symlinks and copy the pointed to item.
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact. (default true)
@@ -260,7 +261,7 @@ and may be set in the config file.
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs (e.g. file.doc to file.docx). This will confuse sync and reupload every time.
--drive-auth-owner-only Only consider files owned by the authenticated user.
--drive-auth-url string Auth server URL.
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8M)
--drive-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 8Mi)
--drive-client-id string Google Application Client Id
--drive-client-secret string OAuth Client Secret
--drive-disable-http2 Disable drive using http2 (default true)
@@ -290,13 +291,16 @@ and may be set in the config file.
--drive-token string OAuth Access Token as a JSON blob.
--drive-token-url string Token server url.
--drive-trashed-only Only show files that are in the trash.
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8M)
--drive-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 8Mi)
--drive-use-created-date Use file created date instead of modified date.,
--drive-use-shared-date Use date file was shared instead of modified date.
--drive-use-trash Send files to the trash instead of deleting permanently. (default true)
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download. (default off)
--dropbox-auth-url string Auth server URL.
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150M). (default 48M)
--dropbox-batch-mode string Upload file batching sync|async|off. (default "sync")
--dropbox-batch-size int Max number of files in upload batch.
--dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
--dropbox-chunk-size SizeSuffix Upload chunk size. (< 150Mi). (default 48Mi)
--dropbox-client-id string OAuth Client Id
--dropbox-client-secret string OAuth Client Secret
--dropbox-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
@@ -361,7 +365,7 @@ and may be set in the config file.
--http-no-slash Set this if the site doesn't end directories with /
--http-url string URL of http host to connect to
--hubic-auth-url string Auth server URL.
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--hubic-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5Gi)
--hubic-client-id string OAuth Client Id
--hubic-client-secret string OAuth Client Secret
--hubic-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8)
@@ -370,9 +374,9 @@ and may be set in the config file.
--hubic-token-url string Token server url.
--jottacloud-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash.
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10M)
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required. (default 10Mi)
--jottacloud-trashed-only Only show files that are in the trash.
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10M)
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's. (default 10Mi)
--koofr-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--koofr-endpoint string The Koofr API endpoint to use (default "https://app.koofr.net")
--koofr-mountid string Mount ID of the mount to use. If omitted, the primary mount is used.
@@ -387,16 +391,16 @@ and may be set in the config file.
--local-no-preallocate Disable preallocation of disk space for transferred files
--local-no-set-modtime Disable setting modtime
--local-no-sparse Disable sparse files for multi-thread downloads
--local-no-unicode-normalization Don't apply unicode normalization to paths and filenames (Deprecated)
--local-nounc string Disable UNC (long path names) conversion on Windows
--local-zero-size-links Assume the Stat size of links is zero (and read them instead)
--local-unicode-normalization Apply unicode NFC normalization to paths and filenames
--local-zero-size-links Assume the Stat size of links is zero (and read them instead) (Deprecated)
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
--mailru-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--mailru-pass string Password (obscured)
--mailru-speedup-enable Skip full upload if there is another file with same data hash. (default true)
--mailru-speedup-file-patterns string Comma separated list of file name patterns eligible for speedup (put by hash). (default "*.mkv,*.avi,*.mp4,*.mp3,*.zip,*.gz,*.rar,*.pdf")
--mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3G)
--mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk. (default 32M)
--mailru-speedup-max-disk SizeSuffix This option allows you to disable speedup (put by hash) for large files (default 3Gi)
--mailru-speedup-max-memory SizeSuffix Files larger than the size given below will always be hashed on disk. (default 32Mi)
--mailru-user string User name (usually email)
--mega-debug Output more debug from Mega.
--mega-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
@@ -405,7 +409,7 @@ and may be set in the config file.
--mega-user string User name
-x, --one-file-system Don't cross filesystem boundaries (unix/macOS only).
--onedrive-auth-url string Auth server URL.
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default 10M)
--onedrive-chunk-size SizeSuffix Chunk size to upload files with - must be multiple of 320k (327,680 bytes). (default 10Mi)
--onedrive-client-id string OAuth Client Id
--onedrive-client-secret string OAuth Client Secret
--onedrive-drive-id string The ID of the drive to use
@@ -421,7 +425,7 @@ and may be set in the config file.
--onedrive-server-side-across-configs Allow server-side operations (e.g. copy) to work across different onedrive configs.
--onedrive-token string OAuth Access Token as a JSON blob.
--onedrive-token-url string Token server url.
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size. (default 10M)
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size. (default 10Mi)
--opendrive-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--opendrive-password string Password. (obscured)
--opendrive-username string Username
@@ -436,20 +440,20 @@ and may be set in the config file.
--premiumizeme-encoding MultiEncoder This sets the encoding for the backend. (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--putio-encoding MultiEncoder This sets the encoding for the backend. (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4M)
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading. (default 4Mi)
--qingstor-connection-retries int Number of connection retries. (default 3)
--qingstor-encoding MultiEncoder This sets the encoding for the backend. (default Slash,Ctl,InvalidUtf8)
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API.
--qingstor-env-auth Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
--qingstor-secret-access-key string QingStor Secret Access Key (password)
--qingstor-upload-concurrency int Concurrency for multipart uploads. (default 1)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--qingstor-zone string Zone to connect to.
--s3-access-key-id string AWS Access Key ID.
--s3-acl string Canned ACL used when creating buckets and storing or copying objects.
--s3-bucket-acl string Canned ACL used when creating buckets.
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5M)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656G)
--s3-chunk-size SizeSuffix Chunk size to use for uploading. (default 5Mi)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-disable-http2 Disable usage of http2 for S3 backends
--s3-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8,Dot)
@@ -464,6 +468,7 @@ and may be set in the config file.
--s3-memory-pool-use-mmap Whether to use mmap buffers in internal memory pool.
--s3-no-check-bucket If set, don't attempt to check the bucket exists or create it
--s3-no-head If set, don't HEAD uploaded objects to check integrity
--s3-no-head-object If set, don't HEAD objects
--s3-profile string Profile to use in the shared credentials file
--s3-provider string Choose your S3 provider.
--s3-region string Region to connect to.
@@ -478,7 +483,7 @@ and may be set in the config file.
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key.
--s3-storage-class string The storage class to use when storing new objects in S3.
--s3-upload-concurrency int Concurrency for multipart uploads. (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200M)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint.
--s3-v2-auth If true use v2 authentication.
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
@@ -491,6 +496,7 @@ and may be set in the config file.
--seafile-user string User name (usually email address)
--sftp-ask-password Allow asking for SFTP password when needed.
--sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-concurrent-writes If set don't use concurrent writes
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available.
--sftp-host string SSH host to connect to
--sftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
@@ -512,11 +518,11 @@ and may be set in the config file.
--sftp-use-fstat If set use fstat instead of stat
--sftp-use-insecure-cipher Enable the use of insecure ciphers and key exchange methods.
--sftp-user string SSH username, leave blank for current username, $USER
--sharefile-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 64M)
--sharefile-chunk-size SizeSuffix Upload chunk size. Must a power of 2 >= 256k. (default 64Mi)
--sharefile-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
--sharefile-endpoint string Endpoint for API calls.
--sharefile-root-folder-id string ID of the root folder
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload. (default 128M)
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload. (default 128Mi)
--skip-links Don't warn about skipped symlinks.
--sugarsync-access-key-id string Sugarsync Access Key ID.
--sugarsync-app-id string Sugarsync App ID.
@@ -535,7 +541,7 @@ and may be set in the config file.
--swift-auth string Authentication URL for server (OS_AUTH_URL).
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5G)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container. (default 5Gi)
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-encoding MultiEncoder This sets the encoding for the backend. (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
@@ -561,9 +567,12 @@ and may be set in the config file.
--union-create-policy string Policy to choose upstream on CREATE category. (default "epmfs")
--union-search-policy string Policy to choose upstream on SEARCH category. (default "ff")
--union-upstreams string List of space separated upstreams.
--uptobox-access-token string Your access Token, get it from https://uptobox.com/my_account
--uptobox-encoding MultiEncoder This sets the encoding for the backend. (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token
--webdav-encoding string This sets the encoding for the backend.
--webdav-headers CommaSepList Set HTTP headers for all transactions
--webdav-pass string Password. (obscured)
--webdav-url string URL of http host to connect to
--webdav-user string User name. In case NTLM authentication is used, the username should be in the format 'Domain\User'.

View File

@@ -167,7 +167,7 @@ files in the bucket.
rclone sync -i /home/local/directory remote:bucket
### Service Account support ###
### Service Account support
You can set up rclone with Google Cloud Storage in an unattended mode,
i.e. not tied to a specific end-user Google account. This is useful
@@ -194,14 +194,14 @@ the rclone config file, you can set `service_account_credentials` with
the actual contents of the file instead, or set the equivalent
environment variable.
### Anonymous Access ###
### Anonymous Access
For downloads of objects that permit public access you can configure rclone
to use anonymous access by setting `anonymous` to `true`.
With unauthorized access you can't write or create files but only read or list
those buckets and objects that have public read access.
### Application Default Credentials ###
### Application Default Credentials
If no other source of credentials is provided, rclone will fall back
to
@@ -215,13 +215,13 @@ additional commands on your google compute machine -
Note that in the case application default credentials are used, there
is no need to explicitly configure a project number.
### --fast-list ###
### --fast-list
This remote supports `--fast-list` which allows you to use fewer
transactions in exchange for more memory. See the [rclone
docs](/docs/#fast-list) for more details.
### Custom upload headers ###
### Custom upload headers
You can set custom upload headers with the `--header-upload`
flag. Google Cloud Storage supports the headers as described in the
@@ -240,13 +240,24 @@ Eg `--header-upload "Content-Type text/potato"`
Note that the last of these is for setting custom metadata in the form
`--header-upload "x-goog-meta-key: value"`
### Modified time ###
### Modification time
Google google cloud storage stores md5sums natively and rclone stores
modification times as metadata on the object, under the "mtime" key in
RFC3339 format accurate to 1ns.
Google Cloud Storage stores md5sum natively.
Google's [gsutil](https://cloud.google.com/storage/docs/gsutil) tool stores modification time
with one-second precision as `goog-reserved-file-mtime` in file metadata.
#### Restricted filename characters
To ensure compatibility with gsutil, rclone stores modification time in 2 separate metadata entries.
`mtime` uses RFC3339 format with one-nanosecond precision.
`goog-reserved-file-mtime` uses Unix timestamp format with one-second precision.
To get modification time from object metadata, rclone reads the metadata in the following order: `mtime`, `goog-reserved-file-mtime`, object updated time.
Note that rclone's default modify window is 1ns.
Files uploaded by gsutil only contain timestamps with one-second precision.
If you use rclone to sync files previously uploaded by gsutil,
rclone will attempt to update modification time for all these files.
To avoid these possibly unnecessary updates, use `--modify-window 1s`.
### Restricted filename characters
| Character | Value | Replacement |
| --------- |:-----:|:-----------:|

View File

@@ -114,9 +114,9 @@ as they can't be converted to UTF-16.
### Paths on Windows ###
On Windows there are many ways of specifying a path to a file system resource.
Both absolute paths like `C:\path\to\wherever`, and relative paths like
`..\wherever` can be used, and path separator can be either
`\` (as in `C:\path\to\wherever`) or `/` (as in `C:/path/to/wherever`).
Local paths can be absolute, like `C:\path\to\wherever`, or relative,
like `..\wherever`. Network paths in UNC format, `\\server\share`, are also supported.
Path separator can be either `\` (as in `C:\path\to\wherever`) or `/` (as in `C:/path/to/wherever`).
Length of these paths are limited to 259 characters for files and 247
characters for directories, but there is an alternative extended-length
path format increasing the limit to (approximately) 32,767 characters.

View File

@@ -17,6 +17,7 @@ The S3 backend can be used with a number of different providers:
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
@@ -549,7 +550,7 @@ Vault API, so rclone cannot directly access Glacier Vaults.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/s3/s3.go then run make backenddocs" >}}
### Standard Options
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS).
Here are the standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS).
#### --s3-provider
@@ -578,6 +579,8 @@ Choose your S3 provider.
- Netease Object Storage (NOS)
- "Scaleway"
- Scaleway Object Storage
- "SeaweedFS"
- SeaweedFS S3
- "StackPath"
- StackPath Object Storage
- "TencentCOS"
@@ -2268,6 +2271,56 @@ server_side_encryption =
storage_class =
```
### SeaweedFS ###
[SeaweedFS](https://github.com/chrislusf/seaweedfs/) is a distributed storage system for
blobs, objects, files, and data lake, with O(1) disk seek and a scalable file metadata store.
It has an S3 compatible object storage interface.
Assuming the SeaweedFS are configured with `weed shell` as such:
```
> s3.bucket.create -name foo
> s3.configure -access_key=any -secret_key=any -buckets=foo -user=me -actions=Read,Write,List,Tagging,Admin -apply
{
"identities": [
{
"name": "me",
"credentials": [
{
"accessKey": "any",
"secretKey": "any"
}
],
"actions": [
"Read:foo",
"Write:foo",
"List:foo",
"Tagging:foo",
"Admin:foo"
]
}
]
}
```
To use rclone with SeaweedFS, above configuration should end up with something like this in
your config:
```
[seaweedfs_s3]
type = s3
provider = SeaweedFS
access_key_id = any
secret_access_key = any
endpoint = localhost:8333
```
So once set up, for example to copy files into a bucket
```
rclone copy /path/to/files seaweedfs_s3:foo
```
### Wasabi ###
[Wasabi](https://wasabi.com) is a cloud-based object storage service for a

View File

@@ -310,3 +310,8 @@ remote.
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features)
See [rclone about](https://rclone.org/commands/rclone_about/)
### Known issues
If you get errors like `too many open files` this usually happens when the default `ulimit` for system max open files is exceeded. Native Storj protocol opens a large number of TCP connections (each of which is counted as an open file). For a single upload stream you can expect 110 TCP connections to be opened. For a single download stream you can expect 35. This batch of connections will be opened for every 64 MiB segment and you should also expect TCP connections to be reused. If you do many transfers you eventually open a connection to most storage nodes (thousands of nodes).
To fix these, please raise your system limits. You can do this issuing a `ulimit -n 65536` just before you run rclone, inside a bash script or inside the `$USER/.bashrc` file. If you need to change limits system-wide, this usually takes place at `/etc/sysctl.conf` and/or `/etc/security/limits.conf` on Linux, but please refer to your operating system manual.

View File

@@ -198,6 +198,25 @@ Default encoding is Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Hash,Per
- Type: string
- Default: ""
#### --webdav-headers
Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
- Config: headers
- Env Var: RCLONE_WEBDAV_HEADERS
- Type: CommaSepList
- Default:
{{< rem autogenerated options stop >}}
## Provider notes ##

View File

@@ -46,7 +46,7 @@ Got code
[remote]
client_id =
client_secret =
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","expiry":"2016-12-29T12:27:11.362788025Z"}
token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"}
--------------------
y) Yes this is OK
e) Edit this remote

View File

@@ -67,7 +67,6 @@
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon"></i> Amazon S3</a>
<a class="dropdown-item" href="/b2/"><i class="fa fa-fire"></i> Backblaze B2</a>
<a class="dropdown-item" href="/box/"><i class="fa fa-archive"></i> Box</a>
<a class="dropdown-item" href="/cache/"><i class="fa fa-archive"></i> Cache</a>
<a class="dropdown-item" href="/chunker/"><i class="fa fa-cut"></i> Chunker (splits large files)</a>
<a class="dropdown-item" href="/compress/"><i class="fas fa-compress"></i> Compress (transparent gzip compression)</a>
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square"></i> Citrix ShareFile</a>

View File

@@ -18,8 +18,6 @@ import (
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
var MaxCompletedTransfers = 100
var startTime = time.Now()
// StatsInfo accounts all transfers
type StatsInfo struct {
mu sync.RWMutex
@@ -49,6 +47,7 @@ type StatsInfo struct {
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
oldDuration time.Duration // duration of transfers we have culled
group string
startTime time.Time // the moment these stats were initialized or reset
}
// NewStats creates an initialised StatsInfo
@@ -60,6 +59,7 @@ func NewStats(ctx context.Context) *StatsInfo {
checking: newTransferMap(ci.Checkers, "checking"),
transferring: newTransferMap(ci.Transfers, "transferring"),
inProgress: newInProgress(ctx),
startTime: time.Now(),
}
}
@@ -87,7 +87,7 @@ func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
out["deletes"] = s.deletes
out["deletedDirs"] = s.deletedDirs
out["renames"] = s.renames
out["elapsedTime"] = time.Since(startTime).Seconds()
out["elapsedTime"] = time.Since(s.startTime).Seconds()
eta, etaOK := eta(s.bytes, ts.totalBytes, ts.speed)
if etaOK {
out["eta"] = eta.Seconds()
@@ -298,7 +298,7 @@ func (s *StatsInfo) String() string {
buf = &bytes.Buffer{}
xfrchkString = ""
dateString = ""
elapsedTime = time.Since(startTime)
elapsedTime = time.Since(s.startTime)
elapsedTimeSecondsOnly = elapsedTime.Truncate(time.Second/10) % time.Minute
displaySpeedString string
)

504
fs/backend_config.go Normal file
View File

@@ -0,0 +1,504 @@
// Structures and utilities for backend config
//
//
package fs
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs/config/configmap"
)
const (
// ConfigToken is the key used to store the token under
ConfigToken = "token"
// ConfigKeyEphemeralPrefix marks config keys which shouldn't be stored in the config file
ConfigKeyEphemeralPrefix = "config_"
)
// ConfigOAuth should be called to do the OAuth
//
// set in lib/oauthutil to avoid a circular import
var ConfigOAuth func(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, in ConfigIn) (*ConfigOut, error)
// ConfigIn is passed to the Config function for an Fs
//
// The interactive config system for backends is state based. This is
// so that different frontends to the config can be attached, eg over
// the API or web page.
//
// Each call to the config system supplies ConfigIn which tells the
// system what to do. Each will return a ConfigOut which gives a
// question to ask the user and a state to return to. There is one
// special question which allows the backends to do OAuth.
//
// The ConfigIn contains a State which the backend should act upon and
// a Result from the previous question to the user.
//
// If ConfigOut is nil or ConfigOut.State == "" then the process is
// deemed to have finished. If there is no Option in ConfigOut then
// the next state will be called immediately. This is wrapped in
// ConfigGoto and ConfigResult.
//
// Backends should keep no state in memory - if they need to persist
// things between calls it should be persisted in the config file.
// Things can also be persisted in the state using the StatePush and
// StatePop utilities here.
//
// The utilities here are convenience methods for different kinds of
// questions and responses.
//
// Where the questions ask for a name then this should start with
// "config_" to show it is an ephemeral config input rather than the
// actual value stored in the config file. Names beginning with
// "config_fs_" are reserved for internal use.
//
// State names starting with "*" are reserved for internal use.
//
// Note that in the bin directory there is a python program called
// "config.py" which shows how this interface should be used.
type ConfigIn struct {
State string // State to run
Result string // Result from previous Option
}
// ConfigOut is returned from Config function for an Fs
//
// State is the state for the next call to Config
// OAuth is a special value set by oauthutil.ConfigOAuth
// Error is displayed to the user before asking a question
// Result is passed to the next call to Config if Option/OAuth isn't set
type ConfigOut struct {
State string // State to jump to after this
Option *Option // Option to query user about
OAuth interface{} `json:"-"` // Do OAuth if set
Error string // error to be displayed to the user
Result string // if Option/OAuth not set then this is passed to the next state
}
// ConfigInputOptional asks the user for a string which may be empty
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigInputOptional(state string, name string, help string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Default: "",
},
}, nil
}
// ConfigInput asks the user for a non-empty string
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigInput(state string, name string, help string) (*ConfigOut, error) {
out, _ := ConfigInputOptional(state, name, help)
out.Option.Required = true
return out, nil
}
// ConfigPassword asks the user for a password
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
func ConfigPassword(state string, name string, help string) (*ConfigOut, error) {
out, _ := ConfigInputOptional(state, name, help)
out.Option.IsPassword = true
return out, nil
}
// ConfigGoto goes to the next state with empty Result
//
// state should be the next state required
func ConfigGoto(state string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
}, nil
}
// ConfigResult goes to the next state with result given
//
// state should be the next state required
// result should be the result for the next state
func ConfigResult(state, result string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Result: result,
}, nil
}
// ConfigError shows the error to the user and goes to the state passed in
//
// state should be the next state required
// Error should be the error shown to the user
func ConfigError(state string, Error string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Error: Error,
}, nil
}
// ConfigConfirm returns a ConfigOut structure which asks a Yes/No question
//
// state should be the next state required
// Default should be the default state
// name is the config name for this item
// help should be the help shown to the user
func ConfigConfirm(state string, Default bool, name string, help string) (*ConfigOut, error) {
return &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Default: Default,
Examples: []OptionExample{{
Value: "true",
Help: "Yes",
}, {
Value: "false",
Help: "No",
}},
Exclusive: true,
},
}, nil
}
// ConfigChooseFixed returns a ConfigOut structure which has a list of items to choose from.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// items should be the items in the list
//
// It chooses the first item to be the default.
// If there are no items then it will return an error.
// If there is only one item it will short cut to the next state
func ConfigChooseFixed(state string, name string, help string, items []OptionExample) (*ConfigOut, error) {
if len(items) == 0 {
return nil, errors.Errorf("no items found in: %s", help)
}
choose := &ConfigOut{
State: state,
Option: &Option{
Name: name,
Help: help,
Examples: items,
Exclusive: true,
},
}
choose.Option.Default = choose.Option.Examples[0].Value
if len(items) == 1 {
// short circuit asking the question if only one entry
choose.Result = choose.Option.Examples[0].Value
choose.Option = nil
}
return choose, nil
}
// ConfigChoose returns a ConfigOut structure which has a list of items to choose from.
//
// state should be the next state required
// name is the config name for this item
// help should be the help shown to the user
// n should be the number of items in the list
// getItem should return the items (value, help)
//
// It chooses the first item to be the default.
// If there are no items then it will return an error.
// If there is only one item it will short cut to the next state
func ConfigChoose(state string, name string, help string, n int, getItem func(i int) (itemValue string, itemHelp string)) (*ConfigOut, error) {
items := make(OptionExamples, n)
for i := range items {
items[i].Value, items[i].Help = getItem(i)
}
return ConfigChooseFixed(state, name, help, items)
}
// StatePush pushes a new values onto the front of the config string
func StatePush(state string, values ...string) string {
for i := range values {
values[i] = strings.Replace(values[i], ",", "", -1) // replace comma with unicode wide version
}
if state != "" {
values = append(values[:len(values):len(values)], state)
}
return strings.Join(values, ",")
}
type configOAuthKeyType struct{}
// OAuth key for config
var configOAuthKey = configOAuthKeyType{}
// ConfigOAuthOnly marks the ctx so that the Config will stop after
// finding an OAuth
func ConfigOAuthOnly(ctx context.Context) context.Context {
return context.WithValue(ctx, configOAuthKey, struct{}{})
}
// Return true if ctx is marked as ConfigOAuthOnly
func isConfigOAuthOnly(ctx context.Context) bool {
return ctx.Value(configOAuthKey) != nil
}
// StatePop pops a state from the front of the config string
// It returns the new state and the value popped
func StatePop(state string) (newState string, value string) {
comma := strings.IndexRune(state, ',')
if comma < 0 {
return "", state
}
value, newState = state[:comma], state[comma+1:]
value = strings.Replace(value, "", ",", -1) // replace unicode wide comma with comma
return newState, value
}
// BackendConfig calls the config for the backend in ri
//
// It wraps any OAuth transactions as necessary so only straight
// forward config questions are emitted
func BackendConfig(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, choices configmap.Getter, in ConfigIn) (out *ConfigOut, err error) {
for {
out, err = backendConfigStep(ctx, name, m, ri, choices, in)
if err != nil {
break
}
if out == nil || out.State == "" {
// finished
break
}
if out.Option != nil {
// question to ask user
break
}
if out.Error != "" {
// error to show user
break
}
// non terminal state, but no question to ask or error to show - loop here
in = ConfigIn{
State: out.State,
Result: out.Result,
}
}
return out, err
}
// ConfigAll should be passed in as the initial state to run the
// entire config
const ConfigAll = "*all"
// Run the config state machine for the normal config
func configAll(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, in ConfigIn) (out *ConfigOut, err error) {
if len(ri.Options) == 0 {
return ConfigGoto("*postconfig")
}
// States are encoded
//
// *all-ACTION,NUMBER,ADVANCED
//
// Where NUMBER is the curent state, ADVANCED is a flag true or false
// to say whether we are asking about advanced config and
// ACTION is what the state should be doing next.
stateParams, state := StatePop(in.State)
stateParams, stateNumber := StatePop(stateParams)
_, stateAdvanced := StatePop(stateParams)
optionNumber := 0
advanced := stateAdvanced == "true"
if stateNumber != "" {
optionNumber, err = strconv.Atoi(stateNumber)
if err != nil {
return nil, errors.Wrap(err, "internal error: bad state number")
}
}
// Detect if reached the end of the questions
if optionNumber == len(ri.Options) {
if ri.Options.HasAdvanced() {
return ConfigConfirm("*all-advanced", false, "config_fs_advanced", "Edit advanced config?")
}
return ConfigGoto("*postconfig")
} else if optionNumber < 0 || optionNumber > len(ri.Options) {
return nil, errors.New("internal error: option out of range")
}
// Make the next state
newState := func(state string, i int, advanced bool) string {
return StatePush("", state, fmt.Sprint(i), fmt.Sprint(advanced))
}
// Find the current option
option := &ri.Options[optionNumber]
switch state {
case "*all":
// If option is hidden or doesn't match advanced setting then skip it
if option.Hide&OptionHideConfigurator != 0 || option.Advanced != advanced {
return ConfigGoto(newState("*all", optionNumber+1, advanced))
}
// Skip this question if it isn't the correct provider
provider, _ := m.Get(ConfigProvider)
if !MatchProvider(option.Provider, provider) {
return ConfigGoto(newState("*all", optionNumber+1, advanced))
}
out = &ConfigOut{
State: newState("*all-set", optionNumber, advanced),
Option: option,
}
// Filter examples by provider if necessary
if provider != "" && len(option.Examples) > 0 {
optionCopy := option.Copy()
optionCopy.Examples = OptionExamples{}
for _, example := range option.Examples {
if MatchProvider(example.Provider, provider) {
optionCopy.Examples = append(optionCopy.Examples, example)
}
}
out.Option = optionCopy
}
return out, nil
case "*all-set":
// Set the value if not different to current
// Note this won't set blank values in the config file
// if the default is blank
currentValue, _ := m.Get(option.Name)
if currentValue != in.Result {
m.Set(option.Name, in.Result)
}
// Find the next question
return ConfigGoto(newState("*all", optionNumber+1, advanced))
case "*all-advanced":
// Reply to edit advanced question
if in.Result == "true" {
return ConfigGoto(newState("*all", 0, true))
}
return ConfigGoto("*postconfig")
}
return nil, errors.Errorf("internal error: bad state %q", state)
}
func backendConfigStep(ctx context.Context, name string, m configmap.Mapper, ri *RegInfo, choices configmap.Getter, in ConfigIn) (out *ConfigOut, err error) {
ci := GetConfig(ctx)
Debugf(name, "config in: state=%q, result=%q", in.State, in.Result)
defer func() {
Debugf(name, "config out: out=%+v, err=%v", out, err)
}()
switch {
case strings.HasPrefix(in.State, ConfigAll):
// Do all config
out, err = configAll(ctx, name, m, ri, in)
case strings.HasPrefix(in.State, "*oauth"):
// Do internal oauth states
out, err = ConfigOAuth(ctx, name, m, ri, in)
case strings.HasPrefix(in.State, "*postconfig"):
// Do the post config starting from state ""
in.State = ""
return backendConfigStep(ctx, name, m, ri, choices, in)
case strings.HasPrefix(in.State, "*"):
err = errors.Errorf("unknown internal state %q", in.State)
default:
// Otherwise pass to backend
if ri.Config == nil {
return nil, nil
}
out, err = ri.Config(ctx, name, m, in)
}
if err != nil {
return nil, err
}
switch {
case out == nil:
case out.OAuth != nil:
// If this is an OAuth state the deal with it here
returnState := out.State
// If rclone authorize, stop after doing oauth
if isConfigOAuthOnly(ctx) {
Debugf(nil, "OAuth only is set - overriding return state")
returnState = ""
}
// Run internal state, saving the input so we can recall the state
return ConfigGoto(StatePush("", "*oauth", returnState, in.State, in.Result))
case out.Option != nil:
if out.Option.Name == "" {
return nil, errors.New("internal error: no name set in Option")
}
// If override value is set in the choices then use that
if result, ok := choices.Get(out.Option.Name); ok {
Debugf(nil, "Override value found, choosing value %q for state %q", result, out.State)
return ConfigResult(out.State, result)
}
// If AutoConfirm is set, choose the default value
if ci.AutoConfirm {
result := fmt.Sprint(out.Option.Default)
Debugf(nil, "Auto confirm is set, choosing default %q for state %q, override by setting config parameter %q", result, out.State, out.Option.Name)
return ConfigResult(out.State, result)
}
// If fs.ConfigEdit is set then make the default value
// in the config the current value.
if result, ok := choices.Get(ConfigEdit); ok && result == "true" {
if value, ok := m.Get(out.Option.Name); ok {
newOption := out.Option.Copy()
oldValue := newOption.Value
err = newOption.Set(value)
if err != nil {
Errorf(nil, "Failed to set %q from %q - using default: %v", out.Option.Name, value, err)
} else {
newOption.Default = newOption.Value
newOption.Value = oldValue
out.Option = newOption
}
}
}
}
return out, nil
}
// MatchProvider returns true if provider matches the providerConfig string.
//
// The providerConfig string can either be a list of providers to
// match, or if it starts with "!" it will be a list of providers not
// to match.
//
// If either providerConfig or provider is blank then it will return true
func MatchProvider(providerConfig, provider string) bool {
if providerConfig == "" || provider == "" {
return true
}
negate := false
if strings.HasPrefix(providerConfig, "!") {
providerConfig = providerConfig[1:]
negate = true
}
providers := strings.Split(providerConfig, ",")
matched := false
for _, p := range providers {
if p == provider {
matched = true
break
}
}
if negate {
return !matched
}
return matched
}

Some files were not shown because too many files have changed in this diff Show More