1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-02 01:33:24 +00:00

Compare commits

..

85 Commits

Author SHA1 Message Date
Nick Craig-Wood
cebc3d7cf0 vendor: patch fuse to try to fix #3697 - FIXME DO NOT MERGE VENDOR PATCH 2019-12-10 12:08:48 +00:00
Nick Craig-Wood
50bb9b7bdd check: fix --one-way recursing more directories than it needs to
Before this change rclone traversed all directories in the destination.

After this change rclone doesn't traverse directories in the
destination that don't exist in the source if the `--one-way` flag is
set.

See: https://forum.rclone.org/t/check-with-one-way-flag-should-not-traverses-all-destination-directories/13263
2019-12-07 13:26:55 +00:00
Nick Craig-Wood
4537d9b5cf operations: make reopen code error on NoLowLevelRetry errors - fixes #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
684dbe0e9d local: make source file being updated errors be NoLowLevelRetry errors #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
572c1079a5 fserrors: Make a new NoLowLevelRetry error and don't retry them #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
cb97239a60 build: pin actions/checkout to v1 to fix build failure 2019-12-04 13:48:03 +00:00
Nick Craig-Wood
e48145f959 Add David Cole to contributors 2019-12-04 12:14:30 +00:00
Nick Craig-Wood
2150cf7362 Add email for Aleksandar Janković 2019-12-04 12:14:21 +00:00
David Cole
707e51eac7 docs: correct typo in gui docs 2019-12-04 12:08:52 +00:00
Nick Craig-Wood
0d10640aaa s3: add --s3-copy-cutoff for size to switch to multipart copy
Before this change we used the same (relatively low limits) for server
side copy as we did for multipart uploads.  It doesn't make sense to
use the same limits since no data is being downloaded or uploaded for
a server side copy.

This change introduces a new parameter --s3-copy-cutoff to control
when the switch from single to multipart server size copy happens and
defaults it to the maximum 5GB.

This makes server side copies much more efficient.

It also fixes the erroneous error when trying to set the modification
time of a file bigger than 5GB.

See #3778
2019-12-03 10:37:55 +00:00
Nick Craig-Wood
f4746f5064 s3: fix multipart copy - fixes #3778
Before this change multipart copies were giving the error

    Range specified is not valid for source object of size

This was due to an off by one error in the range source introduced in
7b1274e29a "s3: support for multipart copy"
2019-12-03 10:37:55 +00:00
Aleksandar Janković
c05bb63f96 s3: fix DisableChecksum condition 2019-12-02 15:15:59 +00:00
Danil Semelenov
e2773b3b4e Fix completion with an encrypted config
Closes #3767.
2019-11-29 14:48:12 +00:00
Nick Craig-Wood
d3b0bed091 drive: make sure invalid auth for teamdrives always reports an error
For some reason Google doesn't return an error if you use a service
account with the wrong permissions to list a team drive.  This gives
the user the false impression that the drive is empty.

This change:
- calls teamdrives get on rclone about
- calls teamdrives get on a listing of the root which returned no entries

These will both detect a team drive which has the incorrect auth and
workaround the issue.

Fixes: #3763
See: https://forum.rclone.org/t/rclone-missing-error-code-when-sas-have-no-permission/13086
See: https://forum.rclone.org/t/need-need-bug-verification-rclone-about-doesnt-work-on-teamdrives-empty-output/13105
2019-11-28 10:51:17 +00:00
Nick Craig-Wood
33c80bbb96 jottacloud: add URL to generate Login Token to config wizard 2019-11-28 10:03:48 +00:00
Nick Craig-Wood
705e4694ed webdav: fix case of "Bearer" in Authorization: header to agree with RFC
Before this change rclone used "Authorization: BEARER token".  However
according the the RFC this should be "Bearer"

https://tools.ietf.org/html/rfc6750#section-2.1

This changes it to "Authorization: Bearer token"

Fixes #3751 and interop with Salesforce Webdav server
2019-11-27 12:04:31 +00:00
Nick Craig-Wood
4fbc90d115 webdav: make nextcloud only upload SHA1 checksums
When using nextcloud, before this change we only uploaded one of SHA1
or MD5 checksum in the OC-Checksum header with preference to SHA1 if
both were set.

This makes the MD5 checksums read as empty string which makes syncing
with checksums less useful than they should be as all the MD5
checksums are blank.

This change makes it so that we only upload the SHA1 to nextcloud.

The behaviour of owncloud is unchanged as owncloud uses the checksum
as an upload integrity check only and calculates its own checksums.

See: https://forum.rclone.org/t/how-to-specify-hash-method-to-checksum/13055
2019-11-27 11:58:55 +00:00
Nick Craig-Wood
ed39adc65b Add Fernando to contributors 2019-11-27 11:40:44 +00:00
Fernando
162fdfe455 mount: document remotes as network shares on Windows
Provided instructions for mounting remotes as network shares/network drives in a Windows environment
2019-11-27 11:40:24 +00:00
buengese
8f33c932f2 jottacloud: update docs for new auth method 2019-11-26 13:49:49 +00:00
buengese
4195bd7880 jottacloud: use new auth method used by official client 2019-11-26 13:49:49 +00:00
Marco Molteni
d72f3e31c0 docs/install: explain how to workaround macOS Gatekeeper requiring notarization
Fix #3689
2019-11-26 12:33:30 +00:00
Garry McNulty
11f44cff50 drive: add --drive-use-shared-date to use date file was shared instead of modified date - fixes #3624 2019-11-26 12:19:44 +00:00
SezalAgrawal
c3751e9a50 operations: fix dedupe continuing on errors like insufficientFilePermisson - fixes #3470
* Fix dedupe on merge continuing on errors like insufficientFilePermisson
* Sorted the directories to remove recursion logic
2019-11-26 10:58:52 +00:00
Nick Craig-Wood
420ae905b5 vfs: make sure existing files opened for write show correct size
Before this change if an existing file was opened for write without
truncate its size would show as 0 rather than the full size of the
file.
2019-11-25 11:31:44 +00:00
Nick Craig-Wood
a7d65bd519 sftp: add --sftp-skip-links to skip symlinks and non regular files - fixes #3716
This also corrects the symlink detection logic to only check symlink
files.  Previous to this it was checking all directories too which was
making it do more stat calls than was necessary.
2019-11-24 16:10:53 +00:00
Nick Craig-Wood
1db31d7149 swift: fix parsing of X-Object-Manifest
Before this change we forgot to URL decode the X-Object-Manifest in a dynamic large object.

This problem was introduced by 2fe8285f89 "swift: reserve
segments of dynamic large object when delete objects in container what
was enabled versioning."
2019-11-21 13:25:02 +00:00
Nick Craig-Wood
4641bd5116 Add anuar45 to contributors 2019-11-21 11:16:04 +00:00
anuar45
7e602dbf39 stats: show deletes in stats and hide zero stats
This shows deletes in the stats.  It also doesn't show zero stats
in order not to make the stats block too long.
2019-11-21 11:15:47 +00:00
Nick Craig-Wood
e14d968f8d Start v1.50.2-DEV development 2019-11-19 16:51:32 +00:00
Nick Craig-Wood
e0eeeaafcd accounting: don't show entries in both transferring and checking
See: https://forum.rclone.org/t/showing-progress-checking/12958
2019-11-19 13:22:33 +00:00
Nick Craig-Wood
d46f8d0ae5 accounting: fix memory leak on retries operations
Before this change if an operation was retried on operations.Copy and
the operation was large enough to use an async buffer then an async
buffer was leaked on the retry.  This leaked memory, a file handle and
a go routine.

After this change if Account.WithBuffer is called and there is already
a buffer, then a new one won't be allocated.
2019-11-19 12:11:59 +00:00
Nick Craig-Wood
1e6278556c Add Maciej Zimnoch to contributors 2019-11-18 16:28:19 +00:00
Nick Craig-Wood
303f4ee152 Add Ankur Gupta to contributors 2019-11-18 16:28:19 +00:00
Nguyễn Hữu Luân
2fe8285f89 swift: reserve segments of dynamic large object when delete objects in container what was enabled versioning.
add code handle move object when moving the object is contained by the container what was enabled versioning with "X-History-Location".
2019-11-18 16:26:10 +00:00
Maciej Zimnoch
f5443ac939 accounting: clear finished transfer in stats-reset
In order to reduce memory usage `stats-reset` also
clears finished transfers.

Fixes #3734
2019-11-18 14:25:32 +00:00
Maciej Zimnoch
7cf056b2c2 accounting: allow MaxCompletedTransfers to be configurable
rclone library users might be intrested in changing default value to
other, or even disabling it. With current version it's impossible which
leads to races when number of uploaded objects exceeds default limit.

Fixes #3732
2019-11-18 14:25:32 +00:00
Ankur Gupta
75a6c49f87 Fix error counter - fixes #3650
For few commands, RClone counts a error multiple times. This was fixed by
creating a new error type which keeps a flag to remember if the error has
already been counted or not. The CountError function now wraps the original
error eith the above new error type and returns it.
2019-11-18 14:13:02 +00:00
Nick Craig-Wood
19229b1215 drive: fix --drive-root-folder-id with team/shared drives
Before this change rclone used the team_drive ID as the root if set
even if the root_folder_id was set too.

This change uses the root_folder_id in preference over the team_drive
which restores the functionality.

This problem was introduced by ba7c2ac443

Fixes #3742
2019-11-16 18:38:21 +00:00
Nick Craig-Wood
b5bb4c2a21 vfs: fix tests not to upload a 0 length file
Some remotes can't upload 0 length files, so this fixes the
TestCacheRename test so that it writes something to the file.
2019-11-15 09:26:40 +00:00
Nick Craig-Wood
479c803fd9 vendor: update all dependencies 2019-11-14 21:51:34 +00:00
Nick Craig-Wood
3dcf1e61cf cache: follow move of upstream library github.com/coreos/bbolt github.com/etcd-io/bbolt 2019-11-14 21:51:34 +00:00
Nick Craig-Wood
3da1cbfc81 Add Marco Molteni to contributors 2019-11-14 21:51:34 +00:00
Marco Molteni
0c9a8cf776 doc: add Scaleway to the S3 table of contents
Hello, documentation for Scaleway was already there, but the TOC was missing it.
2019-11-14 21:49:43 +00:00
Nick Craig-Wood
f3871377c3 Add Sebastian Brandt to contributors 2019-11-14 12:54:42 +00:00
Nick Craig-Wood
cc9a7dc073 Add Barry Muldrey to contributors 2019-11-14 12:54:42 +00:00
Nick Craig-Wood
b61dd809ee Add new email for Anagh Kumar Baranwal 2019-11-14 12:54:38 +00:00
Sebastian Brandt
f158a398f3 sftp: Retry Creation of Connection - fixes #3656
Removes the existing rate limiter because it is implicitly included in
the pacer.
2019-11-14 12:50:01 +00:00
jaKa
acefa5c40d koofr: use rclone HTTP client. 2019-11-14 11:36:44 +00:00
Barry Muldrey
2784c3234b fs/config/configflags: fix --compare-dest and --copy-dest help strings
from rsync manual:

--compare-dest=DIR
    This option instructs rsync to use DIR on the destination machine as an
    additional hierarchy to compare destination files against doing transfers
    (if the files are missing in the destination directory). If a file is found
    in DIR that is identical to the sender's file, the file will NOT be
    transferred to the destination directory. This is useful for creating
    a sparse backup of just files that have changed from an earlier backup.

--copy-dest=DIR
    This option behaves like --compare-dest, but rsync will also copy unchanged
     files found in DIR to the destination directory using a local copy.
      This is useful for doing transfers to a new destination while leaving
       existing files intact, and then doing a flash-cutover when all files
        have been successfully transferred.
2019-11-12 13:37:58 +00:00
Nick Craig-Wood
c21a4fee58 mount,cmount: make sure we call unmount when exiting 2019-11-11 22:08:52 +00:00
Nick Craig-Wood
358f5a8084 vfs: fix edge cases when reading ModTime from file
This fixes the unreliable test TestMount/CacheMode=full/TestFileModTime
2019-11-11 16:20:28 +00:00
Nick Craig-Wood
9115752679 proxy: reduce the internal bcrypt strength to fix race tests
Before this change the race tests were taking too long.  The bcrypt
function went from about 20ms to 1s under the race detector and this
is called for every transaction on webdav.

This change reduces the bcrypt strength so it takes 1ms non race so
the race tests pass and still has adequate security for in memory only
storage.
2019-11-11 16:20:28 +00:00
Nick Craig-Wood
51efb349ac vfs: revise locking in file and dir to fix race conditions 2019-11-11 16:20:27 +00:00
Nick Craig-Wood
e0d9314059 mounttest: fix occasionally failing test TestRenameOpenHandle 2019-11-11 16:20:27 +00:00
Nick Craig-Wood
21c6babdbb mount: enable async reads for a 20% speedup
Now that the vfs can cope with out of order reads we can enable the
async read feature for an increase in througput on the local disk of
about 20%.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
5beeac7959 vfs: make ReadAt for non cached files work better with non-sequential reads
This makes ReadAt for non cached files wait a short time (up to 5mS)
if it gets an out of order read (which would normally cause a seek and
which take a long time) to see if the gap will be filled with an in
order read.

This makes mount2 based on go-fuse work more efficiently and enables
async reading in normal mount.

A similar change was done for WriteAt in af030f74f5
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
be5392f448 vfs: only calculate one hash for reads
This speeds up mounting on the local backend enormously.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
c00dcb7e67 chunkedreader: disable hash calculation for first segment
This will produce a slight speedup for small files.
2019-11-11 16:20:27 +00:00
Nick Craig-Wood
6150ae89d6 vfs: add a newly created file straight into the directory 2019-11-11 15:20:09 +00:00
Nick Craig-Wood
1e423d21e1 drive: fix listing of the root directory with drive.files scope
We attempt to find the ID of the root folder by doing a GET on the
folder ID "root". With scope "drive.files" this fails with a 404
message.

After this change if we get the 404 message, we just carry on using
"root" as the root folder ID and we cache that for future lookups.

This means that changenotify messages will not work correctly in the
root folder but otherwise has minor consequences.

See: https://forum.rclone.org/t/fresh-raspberry-pi-build-google-drive-404-error-failed-to-ls-googleapi-error-404-file-not-found/12791
2019-11-11 09:07:34 +00:00
Brett Dutro
53d55ae760 Add test for cache renaming functionality 2019-11-10 11:58:46 +00:00
Anagh Kumar Baranwal
5928704e1b On rename, rename in cache too if the file exists
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2019-11-10 11:58:46 +00:00
buengese
5ddfa9f7f6 config: SetValueAndSave ignore error if config section does not exist yet 2019-11-09 16:44:08 +00:00
Nick Craig-Wood
9b5308144f s3: Reduce memory usage streaming files by reducing max stream upload size
Before this change rclone would allow the user to stream (eg with
rclone mount, rclone rcat or uploading google photos or docs) 5TB
files.  This meant that rclone allocated 4 * 525 MB buffers per
transfer which is way too much memory by default.

This change makes rclone use the configured chunk size for streamed
uploads.  This is 5MB by default which means that rclone can stream
upload files up to 48GB by default staying below the 10,000 chunks
limit.

This can be increased with --s3-chunk-size if necessary.

If rclone detects that a file is being streamed to s3 it will make a
single NOTICE level log stating the limitation.

This fixes the enormous memory usage.

Fixes #3568
See: https://forum.rclone.org/t/how-much-memory-does-rclone-need/12743
2019-11-09 15:55:19 +00:00
Aleksandar Jankovic
4b20afa94a backend/s3: fix ExpiryWindow value
ExpiryWindow accepts duration but it was set to value 3.
This changes it to 3 * time.Minute since default is 5 min.
2019-11-05 13:55:55 +00:00
Nick Craig-Wood
049ff1f269 config: check a remote exists when creating a new one 2019-11-05 12:39:33 +00:00
Nick Craig-Wood
3f7af64316 config: give config questions default values - fixes #3672 2019-11-05 11:53:44 +00:00
Nick Craig-Wood
0eaf5475ef Start v1.50.1-DEV development 2019-11-02 15:26:01 +00:00
Nick Craig-Wood
7bf056316f local: fix listings of . on Windows - fixes #3676 2019-10-30 16:00:18 +00:00
Xiaoxing Ye
520ddbcceb config: do not open browser on headless if google fs
On google fs (drive, google photos, and google cloud storage), if
headless is selected, do not open browser.

This also supplies a new option "auth-no-open-browser" for authorize
if the user does not want it.

This should fix #3323.
2019-10-30 14:12:42 +00:00
Nick Craig-Wood
1ce1ea34aa hash: fix hash names for DropboxHash and CRC-32
These were unintentionally renamed as part of 1dc8bcd48c

Fixes #3679
2019-10-30 12:20:10 +00:00
Nick Craig-Wood
e6378daadf fshttp: don't print token bucket errors on context cancelled
These happen as a natural part of exceeding --max-transfer and we
don't need to worry the user with them.
2019-10-30 12:20:10 +00:00
Nick Craig-Wood
7ff95c6250 Add Xiaoxing Ye to contributors 2019-10-30 12:20:10 +00:00
Xiaoxing Ye
6d58d9a86f vendor: change goftp/server url
Closing #3674
2019-10-29 17:41:56 +00:00
Chaitanya
e0356f5aae rcd: Adding group parameter to stats 2019-10-29 16:39:37 +00:00
Xiaoxing Ye
191cfb79d1 onedrive: no trailing slash reading metadata...
No trailing slash when reading metadata of an item given item ID.

This should fix #3664.
2019-10-29 13:33:11 +00:00
Nick Craig-Wood
e81eca4055 fshttp: fix error reporting on tpslimit token bucket errors 2019-10-28 22:11:38 +00:00
Nick Craig-Wood
ee3215ac76 build: make replacement of new rclone binary atomic on build
This avoids the "text file busy" message when trying to replace the
binary of a running rclone.
2019-10-28 22:11:38 +00:00
Nick Craig-Wood
199ac61bde rc: add methods to turn on blocking and mutex profiling 2019-10-28 22:11:38 +00:00
Nick Craig-Wood
a40cc1167d Add zero-24 to contributors 2019-10-28 16:49:33 +00:00
zero-24
c57ea8d867 docs: add instructions to create your own dropbox app ID 2019-10-28 16:49:16 +00:00
Nick Craig-Wood
1868c77e16 rc: fix formatting of docs 2019-10-27 10:43:40 +00:00
Brett Dutro
378a3f4133 mount: replace use of WriteAt with Write for cache mode >= writes and O_APPEND
os.File.WriteAt returns an error if a file was opened with O_APPEND.
This replaces it with os.File.Write if the file was opened with
O_APPEND.
2019-10-26 17:27:52 +01:00
Nick Craig-Wood
daff5a824e Start v1.50.0-DEV development 2019-10-26 12:42:06 +01:00
578 changed files with 26644 additions and 8660 deletions

View File

@@ -102,7 +102,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@master
uses: actions/checkout@v1
with:
path: ./src/github.com/${{ github.repository }}
@@ -211,7 +211,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@master
uses: actions/checkout@v1
with:
path: ./src/github.com/${{ github.repository }}

View File

@@ -46,7 +46,8 @@ endif
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all

View File

@@ -89,7 +89,7 @@ Now
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make LAST_TAG=${NEW_TAG} startdev
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend

View File

@@ -1 +1 @@
v1.50.0
v1.50.2

View File

@@ -16,7 +16,7 @@ import (
"sync"
"time"
bolt "github.com/coreos/bbolt"
bolt "github.com/etcd-io/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"

View File

@@ -63,6 +63,7 @@ func init() {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",

View File

@@ -326,6 +326,17 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
@@ -463,6 +474,7 @@ type Options struct {
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
@@ -694,6 +706,9 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
@@ -830,7 +845,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm() {
if !config.Confirm(false) {
return nil
}
client, err := createOAuthClient(opt, name, m)
@@ -1021,16 +1036,22 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
}
// set root folder for a team drive or query the user root folder
if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else if opt.RootFolderID != "" {
if opt.RootFolderID != "" {
// override root folder if set or cached in the config
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else {
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
return nil, err
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil, err
}
}
f.rootFolderID = rootID
m.Set("root_folder_id", rootID)
@@ -1089,6 +1110,8 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
}
size := info.Size
if f.opt.SizeAsQuota {
@@ -1457,6 +1480,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if iErr != nil {
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil
}
@@ -1594,6 +1625,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error {
mu.Lock()
@@ -1606,6 +1638,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
return list.Add(entry)
}
@@ -1662,7 +1695,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return err
}
return list.Flush()
err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
@@ -2035,9 +2082,30 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil
}
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}

View File

@@ -113,7 +113,7 @@ var (
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",

View File

@@ -46,13 +46,26 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
}
// JSON structures returned by new API

View File

@@ -4,12 +4,13 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
@@ -25,7 +26,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
@@ -41,29 +41,25 @@ const enc = encodings.JottaCloud
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configVersion = 1
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{
AuthURL: tokenURL,
TokenURL: tokenURL,
@@ -81,43 +77,39 @@ func init() {
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
}
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
deviceRegistration, err := registerDevice(ctx, srv)
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
log.Fatalf("Failed to parse config version - corrupted config")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
refresh = ver != configVersion
} else {
refresh = true
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
clientConfig := *fs.Config
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
token, err := doAuth(ctx, srv, username, password)
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
@@ -127,7 +119,7 @@ func init() {
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm() {
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
@@ -143,6 +135,8 @@ func init() {
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
},
Options: []fs.Option{{
Name: "md5_memory_limit",
@@ -249,67 +243,51 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
// we don't seem to need any data from this link but the API is not happy if skip it
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
NoResponse: true,
}
_, err = srv.Call(ctx, &opts)
if err != nil {
return token, err
}
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
return token, err
}
token.AccessToken = jsonToken.AccessToken
@@ -471,29 +449,6 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
@@ -504,30 +459,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = ver == configVersion
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")

View File

@@ -16,6 +16,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
@@ -259,7 +260,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil {
return nil, err
}
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)

View File

@@ -350,7 +350,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
@@ -386,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
@@ -409,7 +409,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
accounting.Stats(ctx).Error(err)
err = accounting.Stats(ctx).Error(err)
continue
}
if err != nil {
@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
}
}
@@ -1084,17 +1084,17 @@ func (o *Object) Remove(ctx context.Context) error {
func cleanRootPath(s string, noUNC bool) string {
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = uncPath(s)

View File

@@ -54,7 +54,7 @@ var testsWindows = [][2]string{
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`c:/temp`, `c:\temp`},
{`/temp/file.txt`, `\temp\file.txt`},
{`C:/temp/file.txt`, `C:\temp\file.txt`},
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\#¤%&\()=;^+-`},
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\&\&\&`},
}

View File

@@ -351,8 +351,13 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
//
// If `relPath` == '', do not append the slash (See #3664)
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
if relPath != "" {
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
}
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)

View File

@@ -269,7 +269,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
cf.Protocol = protocol
cf.Host = host
cf.Port = port
cf.ConnectionRetries = opt.ConnectionRetries
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)

View File

@@ -26,6 +26,7 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -693,16 +694,37 @@ The minimum is 0 and the maximum is 5GB.`,
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
When uploading files larger than upload_cutoff or files with unknown
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
@@ -771,12 +793,11 @@ WARNING: Storing parts of an incomplete multipart upload counts towards space us
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
@@ -798,6 +819,7 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
@@ -961,7 +983,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3,
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)
@@ -1642,7 +1664,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.StorageClass = &f.opt.StorageClass
}
if srcSize >= int64(f.opt.UploadCutoff) {
if srcSize >= int64(f.opt.CopyCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
}
return f.pacer.Call(func() (bool, error) {
@@ -1655,8 +1677,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize
var ends string
if partIndex == numParts-1 {
if totalSize >= 0 {
ends = strconv.FormatInt(totalSize, 10)
if totalSize >= 1 {
ends = strconv.FormatInt(totalSize-1, 10)
}
} else {
ends = strconv.FormatInt(start+partSize-1, 10)
@@ -1693,7 +1715,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
}
}()
partSize := int64(f.opt.ChunkSize)
partSize := int64(f.opt.CopyCutoff)
numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart
@@ -1921,11 +1943,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
@@ -1982,6 +1999,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, nil
}
var warnStreamUpload sync.Once
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
@@ -2001,10 +2020,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
warnStreamUpload.Do(func() {
fs.Logf(o.fs, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, fs.SizeSuffix(u.PartSize*s3manager.MaxUploadParts))
})
return
}
// Adjust PartSize until the number of parts is small enough.
@@ -2023,7 +2046,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
if !multipart && !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)

View File

@@ -29,15 +29,17 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/time/rate"
)
const (
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
@@ -154,6 +156,11 @@ Home directory can be found in a shared folder called "home"
Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "skip_links",
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}},
}
fs.Register(fsi)
@@ -175,6 +182,7 @@ type Options struct {
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
}
// Fs stores the interface to the remote SFTP files
@@ -190,7 +198,7 @@ type Fs struct {
cachedHashes *hash.Set
poolMu sync.Mutex
pool []*conn
connLimit *rate.Limiter // for limiting number of connections per second
pacer *fs.Pacer // pacer for operations
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -270,10 +278,6 @@ func (c *conn) closed() error {
// Open a new connection to the SFTP server.
func (f *Fs) sftpConnection() (c *conn, err error) {
// Rate limit rate of new connections
err = f.connLimit.Wait(context.Background())
if err != nil {
return nil, errors.Wrap(err, "limiter failed in connect")
}
c = &conn{
err: make(chan error, 1),
}
@@ -307,7 +311,14 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
if c != nil {
return c, nil
}
return f.sftpConnection()
err = f.pacer.Call(func() (bool, error) {
c, err = f.sftpConnection()
if err != nil {
return true, err
}
return false, nil
})
return c, err
}
// Return an SFTP connection to the pool
@@ -465,7 +476,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -595,12 +606,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
remote := path.Join(dir, info.Name())
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() {
if !info.Mode().IsRegular() && !info.IsDir() {
if f.opt.SkipLinks {
// skip non regular file if SkipLinks is set
continue
}
oldInfo := info
info, err = f.stat(remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
}
info = oldInfo
}

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
@@ -530,10 +531,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
if prefix != "" {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
if directory != "" {
if directory != "" && !strings.HasSuffix(directory, "/") {
directory += "/"
}
// Options for ObjectsWalk
@@ -952,6 +953,18 @@ func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
if err != nil {
return false, err
}
xHistoryLocation := headers["X-History-Location"]
if len(xHistoryLocation) > 0 {
return true, nil
}
return false, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
@@ -1083,9 +1096,8 @@ func min(x, y int64) int64 {
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
segmentsContainer, prefix, err := o.getSegmentsDlo()
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
@@ -1114,6 +1126,23 @@ func (o *Object) removeSegments(except string) error {
return nil
}
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
@@ -1300,12 +1329,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
func (o *Object) Remove(ctx context.Context) (err error) {
container, containerPath := o.split()
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
@@ -1314,12 +1340,22 @@ func (o *Object) Remove(ctx context.Context) error {
if err != nil {
return err
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// ...then segments if required
if isDynamicLargeObject {
err = o.removeSegments("")
isInContainerVersioning, err := o.isInContainerVersioning(container)
if err != nil {
return err
}
if !isInContainerVersioning {
err = o.removeSegments("")
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -113,7 +113,8 @@ type Fs struct {
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
hasChecksums bool // set if can use owncloud style checksums
hasMD5 bool // set if can use owncloud style checksums for MD5
hasSHA1 bool // set if can use owncloud style checksums for SHA1
}
// Object describes a webdav object
@@ -215,7 +216,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
},
NoRedirect: true,
}
if f.hasChecksums {
if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -383,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// sets the BearerToken up
func (f *Fs) setBearerToken(token string) {
f.opt.BearerToken = token
f.srv.SetHeader("Authorization", "BEARER "+token)
f.srv.SetHeader("Authorization", "Bearer "+token)
}
// fetch the bearer token using the command
@@ -430,11 +431,12 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.canStream = true
f.precision = time.Second
f.useOCMtime = true
f.hasChecksums = true
f.hasMD5 = true
f.hasSHA1 = true
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
f.hasChecksums = true
f.hasSHA1 = true
case "sharepoint":
// To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth
@@ -536,7 +538,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
"Depth": depth,
},
}
if f.hasChecksums {
if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -945,10 +947,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if f.hasChecksums {
return hash.NewHashSet(hash.MD5, hash.SHA1)
hashes := hash.Set(hash.None)
if f.hasMD5 {
hashes.Add(hash.MD5)
}
return hash.Set(hash.None)
if f.hasSHA1 {
hashes.Add(hash.SHA1)
}
return hashes
}
// About gets quota information
@@ -1015,13 +1021,11 @@ func (o *Object) Remote() string {
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.hasChecksums {
switch t {
case hash.SHA1:
return o.sha1, nil
case hash.MD5:
return o.md5, nil
}
if t == hash.MD5 && o.fs.hasMD5 {
return o.md5, nil
}
if t == hash.SHA1 && o.fs.hasSHA1 {
return o.sha1, nil
}
return "", hash.ErrUnsupported
}
@@ -1042,10 +1046,14 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true
o.size = info.Size
o.modTime = time.Time(info.Modified)
if o.fs.hasChecksums {
if o.fs.hasMD5 || o.fs.hasSHA1 {
hashes := info.Hashes()
o.sha1 = hashes[hash.SHA1]
o.md5 = hashes[hash.MD5]
if o.fs.hasSHA1 {
o.sha1 = hashes[hash.SHA1]
}
if o.fs.hasMD5 {
o.md5 = hashes[hash.MD5]
}
}
return nil
}
@@ -1126,19 +1134,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src),
}
if o.fs.useOCMtime || o.fs.hasChecksums {
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
}
if o.fs.hasChecksums {
// Set an upload checksum - prefer SHA1
//
// This is used as an upload integrity test. If we set
// only SHA1 here, owncloud will calculate the MD5 too.
// Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
if o.fs.hasSHA1 {
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
}
}
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
}
}

View File

@@ -3,11 +3,18 @@ package authorize
import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
var (
noAutoBrowser bool
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
}
var commandDefinition = &cobra.Command{
@@ -16,9 +23,12 @@ var commandDefinition = &cobra.Command{
Long: `
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.`,
rclone config.
Use the --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 3, command, args)
config.Authorize(args)
config.Authorize(args, noAutoBrowser)
},
}

View File

@@ -82,7 +82,7 @@ func ShowVersion() {
func NewFsFile(remote string) (fs.Fs, string) {
_, _, fsPath, err := fs.ParseRemote(remote)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := cache.Get(remote)
@@ -92,7 +92,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
case nil:
return f, ""
default:
fs.CountError(err)
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
@@ -107,13 +107,13 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
if fileName != "" {
if !filter.Active.InActive() {
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
fs.CountError(err)
err = fs.CountError(err)
log.Fatalf(err.Error())
}
// Limit transfers to this file
err := filter.Active.AddFile(fileName)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
}
@@ -135,7 +135,7 @@ func NewFsSrc(args []string) fs.Fs {
func newFsDir(remote string) fs.Fs {
f, err := cache.Get(remote)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return f
@@ -189,11 +189,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
fdst, err := cache.Get(dstRemote)
switch err {
case fs.ErrorIsFile:
fs.CountError(err)
_ = fs.CountError(err)
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
case nil:
default:
fs.CountError(err)
_ = fs.CountError(err)
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
}
return
@@ -239,7 +239,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
SigInfoHandler()
for try := 1; try <= *retries; try++ {
err = f()
fs.CountError(err)
err = fs.CountError(err)
lastErr := accounting.GlobalStats().GetLastError()
if err == nil {
err = lastErr
@@ -386,12 +386,12 @@ func initConfig() {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatal(err)
}
atexit.Register(func() {
@@ -405,17 +405,17 @@ func initConfig() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatal(err)
}
err = f.Close()
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
log.Fatal(err)
}
})

View File

@@ -371,7 +371,12 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
if errc != 0 {
return errc
}
n, err := handle.WriteAt(buff, ofst)
var err error
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
n, err = handle.WriteAt(buff, ofst)
} else {
n, err = handle.Write(buff)
}
if err != nil {
return translateError(err)
}

View File

@@ -21,6 +21,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
@@ -207,7 +208,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
// Mount it
FS, errChan, _, err := mount(f, mountpoint)
FS, errChan, unmount, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
@@ -217,6 +218,10 @@ func Mount(f fs.Fs, mountpoint string) error {
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}

View File

@@ -88,7 +88,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
return true, false
}
@@ -97,7 +97,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
}
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dst, "Error computing hash: %v", err)
return true, false
}
@@ -106,7 +106,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
}
if cryptHash != underlyingHash {
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(src, err.Error())
return true, false
}

View File

@@ -46,10 +46,11 @@ __rclone_custom_func() {
else
__rclone_init_completion -n : || return
fi
local rclone=(command rclone --ask-password=false)
if [[ $cur != *:* ]]; then
local ifs=$IFS
IFS=$'\n'
local remotes=($(command rclone listremotes))
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
IFS=$ifs
local remote
for remote in "${remotes[@]}"; do
@@ -68,7 +69,7 @@ __rclone_custom_func() {
fi
local ifs=$IFS
IFS=$'\n'
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
IFS=$ifs
local line
for line in "${lines[@]}"; do

View File

@@ -5,6 +5,7 @@ package mount
import (
"context"
"io"
"os"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
@@ -41,7 +42,12 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
// Write data to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
var n int
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
} else {
n, err = fh.Handle.Write(req.Data)
}
if err != nil {
return translateError(err)
}

View File

@@ -32,12 +32,10 @@ func mountOptions(device string) (options []fuse.MountOption) {
fuse.Subtype("rclone"),
fuse.FSName(device),
fuse.VolumeName(mountlib.VolumeName),
fuse.AsyncRead(),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.AsyncRead(), - FIXME this causes
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
// which is probably related to errors people are having
//fuse.WritebackCache(),
}
if mountlib.NoAppleDouble {
@@ -139,6 +137,9 @@ func Mount(f fs.Fs, mountpoint string) error {
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.IgnoreSignals()
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")

View File

@@ -50,6 +50,8 @@ func TestRenameOpenHandle(t *testing.T) {
err = file.Close()
require.NoError(t, err)
run.waitForWriters()
// verify file was renamed properly
run.checkDir(t, "renamebla 9")

View File

@@ -34,6 +34,11 @@ func osCreate(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
// os.Create with append
func osAppend(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
}
// TestFileModTimeWithOpenWriters tests mod time on open files
func TestFileModTimeWithOpenWriters(t *testing.T) {
run.skipIfNoFUSE(t)

View File

@@ -78,6 +78,7 @@ func RunTests(t *testing.T, fn MountFn) {
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
t.Run("TestWriteFileFsync", TestWriteFileFsync)
t.Run("TestWriteFileDup", TestWriteFileDup)
t.Run("TestWriteFileAppend", TestWriteFileAppend)
})
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
if !ok {

View File

@@ -2,6 +2,7 @@ package mounttest
import (
"os"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
@@ -130,3 +131,48 @@ func TestWriteFileDup(t *testing.T) {
run.waitForWriters()
run.rm(t, "to be synced")
}
// TestWriteFileAppend tests that O_APPEND works on cache backends >= writes
func TestWriteFileAppend(t *testing.T) {
run.skipIfNoFUSE(t)
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
t.Skip("not supported on vfs-cache-mode < writes")
return
}
// TODO: Windows needs the v1.5 release of WinFsp to handle O_APPEND properly.
// Until it gets released, skip this test on Windows.
if runtime.GOOS == "windows" {
t.Skip("currently unsupported on Windows")
}
filepath := run.path("to be synced")
fh, err := osCreate(filepath)
require.NoError(t, err)
testData := []byte("0123456789")
appendData := []byte("10")
_, err = fh.Write(testData)
require.NoError(t, err)
err = fh.Close()
require.NoError(t, err)
fh, err = osAppend(filepath)
require.NoError(t, err)
_, err = fh.Write(appendData)
require.NoError(t, err)
err = fh.Close()
require.NoError(t, err)
info, err := os.Stat(filepath)
require.NoError(t, err)
require.EqualValues(t, len(testData)+len(appendData), info.Size())
run.waitForWriters()
run.rm(t, "to be synced")
}

View File

@@ -214,7 +214,7 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
// serveError returns an http.StatusInternalServerError and logs the error
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}

View File

@@ -15,7 +15,6 @@ import (
"strconv"
"sync"
ftp "github.com/goftp/server"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
@@ -29,6 +28,7 @@ import (
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
ftp "goftp.io/server"
)
// Options contains options for the http Server
@@ -155,7 +155,7 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
PassivePorts: opt.PassivePorts,
Auth: s, // implemented by CheckPasswd method
Logger: &Logger{},
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
//TODO implement a maximum of https://godoc.org/goftp.io/server#ServerOpts
}
s.srv = ftp.NewServer(ftpopt)
return s, nil
@@ -210,8 +210,8 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
// CheckPassword is called with the connection.
func findID(callerName []byte) (string, error) {
// Dump the stack in this format
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
// github.com/rclone/rclone/vendor/goftp.io/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/goftp.io/server/conn.go:116 +0x11d
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
buf = buf[:n]

View File

@@ -11,7 +11,6 @@ import (
"fmt"
"testing"
ftp "github.com/goftp/server"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
@@ -19,6 +18,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ftp "goftp.io/server"
)
const (

View File

@@ -68,7 +68,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) {
// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
if w != nil {
http.Error(w, text+".", http.StatusInternalServerError)

View File

@@ -208,7 +208,10 @@ func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}
if err != nil {
return nil, false, err
}
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
// The bcrypt cost is a compromise between security and speed. The password is looked up on every
// transaction for WebDAV so we store it lightly hashed. An attacker would find it easier to go after
// the unencrypted password in memory most likely.
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.MinCost)
if err != nil {
return nil, false, err
}

View File

@@ -271,7 +271,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin
_, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
if err != nil {
accounting.Stats(r.Context()).Error(err)
err = accounting.Stats(r.Context()).Error(err)
fs.Errorf(remote, "Post request rcat error: %v", err)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)

View File

@@ -192,7 +192,7 @@ Contributors
* Sheldon Rupp <me@shel.io>
* albertony <12441419+albertony@users.noreply.github.com>
* cron410 <cron410@gmail.com>
* Anagh Kumar Baranwal <anaghk.dos@gmail.com>
* Anagh Kumar Baranwal <anaghk.dos@gmail.com> <6824881+darthShadow@users.noreply.github.com>
* Felix Brucker <felix@felixbrucker.com>
* Santiago Rodríguez <scollazo@users.noreply.github.com>
* Craig Miskell <craig.miskell@fluxfederation.com>
@@ -263,7 +263,7 @@ Contributors
* garry415 <garry.415@gmail.com>
* forgems <forgems@gmail.com>
* Florian Apolloner <florian@apolloner.eu>
* Aleksandar Jankovic <office@ajankovic.com>
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
* Maran <maran@protonmail.com>
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
@@ -306,3 +306,13 @@ Contributors
* Carlos Ferreyra <crypticmind@gmail.com>
* Saksham Khanna <sakshamkhanna@outlook.com>
* dausruddin <5763466+dausruddin@users.noreply.github.com>
* zero-24 <zero-24@users.noreply.github.com>
* Xiaoxing Ye <ye@xiaoxing.us>
* Barry Muldrey <barry@muldrey.net>
* Sebastian Brandt <sebastian.brandt@friday.de>
* Marco Molteni <marco.molteni@mailbox.org>
* Ankur Gupta <ankur0493@gmail.com>
* Maciej Zimnoch <maciej@scylladb.com>
* anuar45 <serdaliyev.anuar@gmail.com>
* Fernando <ferferga@users.noreply.github.com>
* David Cole <david.cole@sohonet.com>

View File

@@ -1,11 +1,30 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2019-10-26"
date: "2019-11-19"
---
# Changelog
## v1.50.2 - 2019-11-19
* Bug Fixes
* accounting: Fix memory leak on retries operations (Nick Craig-Wood)
* Drive
* Fix listing of the root directory with drive.files scope (Nick Craig-Wood)
* Fix --drive-root-folder-id with team/shared drives (Nick Craig-Wood)
## v1.50.1 - 2019-11-02
* Bug Fixes
* hash: Fix accidentally changed hash names for `DropboxHash` and `CRC-32` (Nick Craig-Wood)
* fshttp: Fix error reporting on tpslimit token bucket errors (Nick Craig-Wood)
* fshttp: Don't print token bucket errors on context cancelled (Nick Craig-Wood)
* Local
* Fix listings of . on Windows (Nick Craig-Wood)
* Onedrive
* Fix DirMove/Move after Onedrive change (Xiaoxing Ye)
## v1.50.0 - 2019-10-26
* New backends

View File

@@ -22,7 +22,8 @@ rclone authorize [flags]
### Options
```
-h, --help help for authorize
--auth-no-open-browser Do not automatically open auth link in default browser
-h, --help help for authorize
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -65,6 +65,28 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage).
#### Mount as a network drive
By default, rclone will mount the remote as a normal drive. However, you can also mount it as a **Network Drive**
(or **Network Share**, as mentioned in some places)
Unlike other systems, Windows provides a different filesystem type for network drives.
Windows and other programs treat the network drives and fixed/removable drives differently:
In network drives, many I/O operations are optimized, as the high latency and low reliability
(compared to a normal drive) of a network is expected.
Although many people prefer network shares to be mounted as normal system drives, this might cause
some issues, such as programs not working as expected or freezes and errors while operating with the
mounted remote in Windows Explorer. If you experience any of those, consider mounting rclone remotes as network shares,
as Windows expects normal drives to be fast and reliable, while cloud storage is far from that.
See also [Limitations](#limitations) section below for more info
Add `--fuse-flag --VolumePrefix=\server\share` to your `mount` command, **replacing `share` with any other
name of your choice if you are mounting more than one remote**. Otherwise, the mountpoints will conflict and
your mounted filesystems will overlap.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
### Limitations
Without the use of "--vfs-cache-mode" this can only write files

View File

@@ -191,3 +191,22 @@ If you have more than 10,000 files in a directory then `rclone purge
dropbox:dir` will return the error `Failed to purge: There are too
many files involved in this operation`. As a work-around do an
`rclone delete dropbox:dir` followed by an `rclone rmdir dropbox:dir`.
### Get your own Dropbox App ID ###
When you use rclone with Dropbox in its default configuration you are using rclone's App ID. This is shared between all the rclone users.
Here is how to create your own Dropbox App ID for rclone:
1. Log into the [Dropbox App console](https://www.dropbox.com/developers/apps/create) with your Dropbox Account (It need not
to be the same account as the Dropbox you want to access)
2. Choose an API => Usually this should be `Dropbox API`
3. Choose the type of access you want to use => Full Dropbox or App Folder
4. Name your App
5. Click the button `Create App`
6. Find the `App key` and `App secret` Use these values in rclone config to add a new remote or edit an existing remote.

View File

@@ -99,7 +99,7 @@ Or instead of htpassword if you just want a single user and password:
The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react).
Bug reports and contributions very welcome welcome :-)
Bug reports and contributions are very welcome :-)
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).

View File

@@ -56,7 +56,14 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config
## macOS installation from precompiled binary ##
## macOS installation with brew ##
brew install rclone
## macOS installation from precompiled binary, using curl ##
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
notarized it is enough to download with `curl`.
Download the latest version of rclone.
@@ -81,6 +88,19 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config
## macOS installation from precompiled binary, using a web browser ##
When downloading a binary with a web browser, the browser will set the macOS
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
`rclone`, a pop-up will appear saying:
“rclone” cannot be opened because the developer cannot be verified.
macOS cannot verify that this app is free from malware.
The simplest fix is to run
xattr -d com.apple.quarantine rclone
## Install with docker ##
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).

View File

@@ -11,7 +11,7 @@ Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
To configure Jottacloud you will need to enter your username and password and select a mountpoint.
To configure Jottacloud you will need to generate a personal security token in the Jottacloud web inteface. You will the option to do in your [account security settings](https://www.jottacloud.com/web/secure). Note that the web inteface may refer to this token as a JottaCli token.
Here is an example of how to make a remote called `remote`. First run:
@@ -42,16 +42,8 @@ n) No
y/n> n
Remote config
Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.
y) Yes
n) No
y/n> y
Username> 0xC4KE@gmail.com
Your Jottacloud password is only required during setup and will not be stored.
password:
Generate a personal login token here: https://www.jottacloud.com/web/secure
Login Token> <your token here>
Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?
@@ -74,11 +66,10 @@ Mountpoints> 1
[jotta]
type = jottacloud
user = 0xC4KE@gmail.com
client_id = .....
client_secret = ........
token = {........}
device = Jotta
mountpoint = Archive
configVersion = 1
--------------------
y) Yes this is OK
e) Edit this remote
@@ -102,7 +93,7 @@ To copy a local directory to an Jottacloud directory called backup
### Devices and Mountpoints ###
The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup.
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by the official rclone provides the option to select other devices and mountpoints during config.
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by any of the official clients rclone provides the option to select other devices and mountpoints during config.
### --fast-list ###

View File

@@ -340,6 +340,7 @@ Authentication is required for this call.
### config/get: Get a remote in the config file. {#config/get}
Parameters:
- name - name of remote to get
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.
@@ -482,6 +483,7 @@ If group is not provided then summed up stats for all groups will be
returned.
Parameters
- group - name of the stats group (string)
Returns the following values:
@@ -519,12 +521,12 @@ The value for "eta" is null if an eta cannot be determined.
### core/stats-reset: Reset stats. {#core/stats-reset}
This clears counters and errors for all stats or specific stats group if group
is provided.
This clears counters, errors and finished transfers for all stats or specific
stats group if group is provided.
Parameters
- group - name of the stats group (string)
```
### core/transferred: Returns stats about completed transfers. {#core/transferred}
@@ -538,6 +540,7 @@ returned.
Note only the last 100 completed transfers are returned.
Parameters
- group - name of the stats group (string)
Returns the following values:
@@ -561,6 +564,7 @@ Returns the following values:
### core/version: Shows the current version of rclone and the go runtime. {#core/version}
This shows the current version of go and the go runtime
- version - rclone version, eg "v1.44"
- decomposed - version number as [major, minor, patch, subpatch]
- note patch and subpatch will be 999 for a git compiled version
@@ -569,19 +573,60 @@ This shows the current version of go and the go runtime
- arch - cpu architecture in use according to Go
- goVersion - version of Go runtime in use
### debug/set-block-profile-rate: Set runtime.SetBlockProfileRate for blocking profiling. {#debug/set-block-profile-rate}
SetBlockProfileRate controls the fraction of goroutine blocking events
that are reported in the blocking profile. The profiler aims to sample
an average of one blocking event per rate nanoseconds spent blocked.
To include every blocking event in the profile, pass rate = 1. To turn
off profiling entirely, pass rate <= 0.
After calling this you can use this to see the blocking profile:
go tool pprof http://localhost:5572/debug/pprof/block
Parameters
- rate - int
### debug/set-mutex-profile-fraction: Set runtime.SetMutexProfileFraction for mutex profiling. {#debug/set-mutex-profile-fraction}
SetMutexProfileFraction controls the fraction of mutex contention
events that are reported in the mutex profile. On average 1/rate
events are reported. The previous rate is returned.
To turn off profiling entirely, pass rate 0. To just read the current
rate, pass rate < 0. (For n>1 the details of sampling may change.)
Once this is set you can look use this to profile the mutex contention:
go tool pprof http://localhost:5572/debug/pprof/mutex
Parameters
- rate - int
Results
- previousRate - int
### job/list: Lists the IDs of the running jobs {#job/list}
Parameters - None
Results
- jobids - array of integer job ids
### job/status: Reads the status of the job ID {#job/status}
Parameters
- jobid - id of the job (integer)
Results
- finished - boolean
- duration - time in seconds that the job ran for
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
@@ -596,6 +641,7 @@ Results
### job/stop: Stop the running job {#job/stop}
Parameters
- jobid - id of the job (integer)
### operations/about: Return the space used on the remote {#operations/about}
@@ -1189,13 +1235,20 @@ You can see a summary of profiles available at http://localhost:5572/debug/pprof
Here is how to use some of them:
* Memory: `go tool pprof http://localhost:5572/debug/pprof/heap`
* Go routines: `curl http://localhost:5572/debug/pprof/goroutine?debug=1`
* 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
* 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
- Memory: `go tool pprof http://localhost:5572/debug/pprof/heap`
- Go routines: `curl http://localhost:5572/debug/pprof/goroutine?debug=1`
- 30-second CPU profile: `go tool pprof http://localhost:5572/debug/pprof/profile`
- 5-second execution trace: `wget http://localhost:5572/debug/pprof/trace?seconds=5`
- Goroutine blocking profile
- Enable first with: `rclone rc debug/set-block-profile-rate rate=1` ([docs](#debug/set-block-profile-rate))
- `go tool pprof http://localhost:5572/debug/pprof/block`
- Contended mutexes:
- Enable first with: `rclone rc debug/set-mutex-profile-fraction rate=1` ([docs](#debug/set-mutex-profile-fraction))
- `go tool pprof http://localhost:5572/debug/pprof/mutex`
See the [net/http/pprof docs](https://golang.org/pkg/net/http/pprof/)
for more info on how to use the profiling and for a general overview
see [the Go team's blog post on profiling go programs](https://blog.golang.org/profiling-go-programs).
The profiling hook is [zero overhead unless it is used](https://stackoverflow.com/q/26545159/164234).

View File

@@ -16,6 +16,7 @@ The S3 backend can be used with a number of different providers:
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
* {{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`

View File

@@ -70,6 +70,10 @@ func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name str
// WithBuffer - If the file is above a certain size it adds an Async reader
func (acc *Account) WithBuffer() *Account {
// if already have a buffer then just return
if acc.withBuf {
return acc
}
acc.withBuf = true
var buffers int
if acc.size >= int64(fs.Config.BufferSize) || acc.size == -1 {
@@ -118,14 +122,16 @@ func (acc *Account) StopBuffering() {
// async buffer (if any) and re-adding it
func (acc *Account) UpdateReader(in io.ReadCloser) {
acc.mu.Lock()
if acc.withBuf {
withBuf := acc.withBuf
if withBuf {
acc.StopBuffering()
acc.withBuf = false
}
acc.in = in
acc.close = in
acc.origIn = in
acc.closed = false
if acc.withBuf {
if withBuf {
acc.WithBuffer()
}
acc.mu.Unlock()
@@ -378,6 +384,7 @@ func (acc *Account) RemoteStats() (out rc.Params) {
percentageDone = int(100 * float64(a) / float64(b))
}
out["percentage"] = percentageDone
out["group"] = acc.stats.group
return out
}

View File

@@ -13,8 +13,8 @@ import (
"github.com/rclone/rclone/fs/rc"
)
// Maximum number of completed transfers in startedTransfers list
const maxCompletedTransfers = 100
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
var MaxCompletedTransfers = 100
// StatsInfo accounts all transfers
type StatsInfo struct {
@@ -40,6 +40,7 @@ type StatsInfo struct {
startedTransfers []*Transfer // currently active transfers
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
oldDuration time.Duration // duration of transfers we have culled
group string
}
// NewStats creates an initialised StatsInfo
@@ -291,7 +292,7 @@ func (s *StatsInfo) String() string {
}
}
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -312,16 +313,23 @@ func (s *StatsInfo) String() string {
errorDetails = " (no need to retry)"
}
_, _ = fmt.Fprintf(buf, `
Errors: %10d%s
Checks: %10d / %d, %s
Transferred: %10d / %d, %s
Elapsed time: %10v
`,
s.errors, errorDetails,
s.checks, totalChecks, percent(s.checks, totalChecks),
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
dtRounded)
// Add only non zero stats
if s.errors != 0 {
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
s.errors, errorDetails)
}
if s.checks != 0 || totalChecks != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
s.errors, totalChecks, percent(s.checks, totalChecks))
}
if s.deletes != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
}
if s.transfers != 0 || totalTransfer != 0 {
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
}
_, _ = fmt.Fprintf(buf, "Elapsed time: %10v\n", dtRounded)
}
// checking and transferring have their own locking so unlock
@@ -331,10 +339,10 @@ Elapsed time: %10v
// Add per transfer stats if required
if !fs.Config.StatsOneLine {
if !s.checking.empty() {
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress))
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress, s.transferring))
}
if !s.transferring.empty() {
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress))
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress, nil))
}
}
@@ -474,14 +482,16 @@ func (s *StatsInfo) Errored() bool {
}
// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
func (s *StatsInfo) Error(err error) {
if err == nil {
return
func (s *StatsInfo) Error(err error) error {
if err == nil || fserrors.IsCounted(err) {
return err
}
s.mu.Lock()
defer s.mu.Unlock()
s.errors++
s.lastError = err
err = fserrors.FsError(err)
fserrors.Count(err)
switch {
case fserrors.IsFatalError(err):
s.fatalError = true
@@ -494,6 +504,7 @@ func (s *StatsInfo) Error(err error) {
case !fserrors.IsNoRetryError(err):
s.retryError = true
}
return err
}
// RetryAfter returns the time to retry after if it is set. It will
@@ -623,11 +634,29 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
s.mu.Unlock()
}
// PruneTransfers makes sure there aren't too many old transfers
// PruneAllTransfers removes all finished transfers.
func (s *StatsInfo) PruneAllTransfers() {
s.mu.Lock()
for i := 0; i < len(s.startedTransfers); i++ {
tr := s.startedTransfers[i]
if tr.IsDone() {
s.removeTransfer(tr, i)
// i'th element is removed, recover iterator to not skip next element.
i--
}
}
s.mu.Unlock()
}
// PruneTransfers makes sure there aren't too many old transfers by removing
// single finished transfer.
func (s *StatsInfo) PruneTransfers() {
if MaxCompletedTransfers < 0 {
return
}
s.mu.Lock()
// remove a transfer from the start if we are over quota
if len(s.startedTransfers) > maxCompletedTransfers+fs.Config.Transfers {
if len(s.startedTransfers) > MaxCompletedTransfers+fs.Config.Transfers {
for i, tr := range s.startedTransfers {
if tr.IsDone() {
s.removeTransfer(tr, i)

View File

@@ -59,8 +59,10 @@ func resetStats(ctx context.Context, in rc.Params) (rc.Params, error) {
}
if group != "" {
groups.get(group).ResetCounters()
groups.get(group).ResetErrors()
stats := groups.get(group)
stats.ResetCounters()
stats.ResetErrors()
stats.PruneAllTransfers()
} else {
groups.clear()
}
@@ -88,6 +90,7 @@ If group is not provided then summed up stats for all groups will be
returned.
Parameters
- group - name of the stats group (string)
Returns the following values:
@@ -140,6 +143,7 @@ returned.
Note only the last 100 completed transfers are returned.
Parameters
- group - name of the stats group (string)
Returns the following values:
@@ -188,12 +192,12 @@ Returns the following values:
Fn: resetStats,
Title: "Reset stats.",
Help: `
This clears counters and errors for all stats or specific stats group if group
is provided.
This clears counters, errors and finished transfers for all stats or specific
stats group if group is provided.
Parameters
- group - name of the stats group (string)
` + "```" + `
`,
})
}
@@ -243,6 +247,7 @@ func GlobalStats() *StatsInfo {
// NewStatsGroup creates new stats under named group.
func NewStatsGroup(group string) *StatsInfo {
stats := NewStats()
stats.group = group
groups.set(group, stats)
return stats
}
@@ -330,6 +335,7 @@ func (sg *statsGroups) clear() {
for _, stats := range sg.m {
stats.ResetErrors()
stats.ResetCounters()
stats.PruneAllTransfers()
}
sg.m = make(map[string]*StatsInfo)

View File

@@ -78,7 +78,7 @@ func TestStatsError(t *testing.T) {
t0 := time.Now()
t1 := t0.Add(time.Second)
s.Error(nil)
_ = s.Error(nil)
assert.Equal(t, int64(0), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
@@ -86,7 +86,7 @@ func TestStatsError(t *testing.T) {
assert.Equal(t, nil, s.GetLastError())
assert.False(t, s.Errored())
s.Error(io.EOF)
_ = s.Error(io.EOF)
assert.Equal(t, int64(1), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
@@ -95,7 +95,7 @@ func TestStatsError(t *testing.T) {
assert.True(t, s.Errored())
e := fserrors.ErrorRetryAfter(t0)
s.Error(e)
_ = s.Error(e)
assert.Equal(t, int64(2), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
@@ -103,14 +103,14 @@ func TestStatsError(t *testing.T) {
assert.Equal(t, e, s.GetLastError())
err := errors.Wrap(fserrors.ErrorRetryAfter(t1), "potato")
s.Error(err)
err = s.Error(err)
assert.Equal(t, int64(3), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
assert.Equal(t, t1, s.RetryAfter())
assert.Equal(t, t1, fserrors.RetryAfterErrorTime(err))
s.Error(fserrors.FatalError(io.EOF))
_ = s.Error(fserrors.FatalError(io.EOF))
assert.Equal(t, int64(4), s.GetErrors())
assert.True(t, s.HadFatalError())
assert.True(t, s.HadRetryError())
@@ -124,7 +124,7 @@ func TestStatsError(t *testing.T) {
assert.Equal(t, nil, s.GetLastError())
assert.False(t, s.Errored())
s.Error(fserrors.NoRetryError(io.EOF))
_ = s.Error(fserrors.NoRetryError(io.EOF))
assert.Equal(t, int64(1), s.GetErrors())
assert.False(t, s.HadFatalError())
assert.False(t, s.HadRetryError())
@@ -382,10 +382,61 @@ func TestTimeRangeDuration(t *testing.T) {
}
func TestPruneTransfers(t *testing.T) {
max := maxCompletedTransfers + fs.Config.Transfers
for _, test := range []struct {
Name string
Transfers int
Limit int
ExpectedStartedTransfers int
}{
{
Name: "Limited number of StartedTransfers",
Limit: 100,
Transfers: 200,
ExpectedStartedTransfers: 100 + fs.Config.Transfers,
},
{
Name: "Unlimited number of StartedTransfers",
Limit: -1,
Transfers: 200,
ExpectedStartedTransfers: 200,
},
} {
t.Run(test.Name, func(t *testing.T) {
prevLimit := MaxCompletedTransfers
MaxCompletedTransfers = test.Limit
defer func() { MaxCompletedTransfers = prevLimit }()
s := NewStats()
for i := int64(1); i <= int64(test.Transfers); i++ {
s.AddTransfer(&Transfer{
startedAt: time.Unix(i, 0),
completedAt: time.Unix(i+1, 0),
})
}
s.mu.Lock()
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s.totalDuration())
assert.Equal(t, test.Transfers, len(s.startedTransfers))
s.mu.Unlock()
for i := 0; i < test.Transfers; i++ {
s.PruneTransfers()
}
s.mu.Lock()
assert.Equal(t, time.Duration(test.Transfers)*time.Second, s.totalDuration())
assert.Equal(t, test.ExpectedStartedTransfers, len(s.startedTransfers))
s.mu.Unlock()
})
}
}
func TestPruneAllTransfers(t *testing.T) {
const transfers = 10
s := NewStats()
for i := int64(1); i <= int64(max+100); i++ {
for i := int64(1); i <= int64(transfers); i++ {
s.AddTransfer(&Transfer{
startedAt: time.Unix(i, 0),
completedAt: time.Unix(i+1, 0),
@@ -393,17 +444,12 @@ func TestPruneTransfers(t *testing.T) {
}
s.mu.Lock()
assert.Equal(t, time.Duration(max+100)*time.Second, s.totalDuration())
assert.Equal(t, max+100, len(s.startedTransfers))
assert.Equal(t, transfers, len(s.startedTransfers))
s.mu.Unlock()
for i := 0; i < 200; i++ {
s.PruneTransfers()
}
s.PruneAllTransfers()
s.mu.Lock()
assert.Equal(t, time.Duration(max+100)*time.Second, s.totalDuration())
assert.Equal(t, max, len(s.startedTransfers))
assert.Empty(t, s.startedTransfers)
s.mu.Unlock()
}

View File

@@ -63,12 +63,21 @@ func (ss *stringSet) count() int {
return len(ss.items)
}
// String returns string representation of set items.
func (ss *stringSet) String(progress *inProgress) string {
// String returns string representation of set items excluding any in
// exclude (if set).
func (ss *stringSet) String(progress *inProgress, exclude *stringSet) string {
ss.mu.RLock()
defer ss.mu.RUnlock()
strngs := make([]string, 0, len(ss.items))
for name := range ss.items {
if exclude != nil {
exclude.mu.RLock()
_, found := exclude.items[name]
exclude.mu.RUnlock()
if found {
continue
}
}
var out string
if acc := progress.get(name); acc != nil {
out = acc.String()

View File

@@ -18,6 +18,7 @@ type TransferSnapshot struct {
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at,omitempty"`
Error error `json:"-"`
Group string `json:"group"`
}
// MarshalJSON implements json.Marshaler interface.
@@ -26,6 +27,7 @@ func (as TransferSnapshot) MarshalJSON() ([]byte, error) {
if as.Error != nil {
err = as.Error.Error()
}
type Alias TransferSnapshot
return json.Marshal(&struct {
Error string `json:"error"`
@@ -84,7 +86,7 @@ func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking
// Must be called after transfer is finished to run proper cleanups.
func (tr *Transfer) Done(err error) {
if err != nil {
tr.stats.Error(err)
err = tr.stats.Error(err)
tr.mu.Lock()
tr.err = err
@@ -176,5 +178,6 @@ func (tr *Transfer) Snapshot() TransferSnapshot {
StartedAt: tr.startedAt,
CompletedAt: tr.completedAt,
Error: tr.err,
Group: tr.stats.group,
}
}

View File

@@ -7,6 +7,7 @@ import (
"sync"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
)
// io related errors returned by ChunkedReader
@@ -215,12 +216,12 @@ func (cr *ChunkedReader) openRange() error {
var err error
if length <= 0 {
if offset == 0 {
rc, err = cr.o.Open(cr.ctx)
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)})
} else {
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: -1})
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: -1})
}
} else {
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: offset + length - 1})
rc, err = cr.o.Open(cr.ctx, &fs.HashesOption{Hashes: hash.Set(hash.None)}, &fs.RangeOption{Start: offset, End: offset + length - 1})
}
if err != nil {
return err

View File

@@ -32,7 +32,7 @@ var (
//
// This is a function pointer to decouple the config
// implementation from the fs
CountError = func(err error) {}
CountError = func(err error) error { return nil }
// ConfigProvider is the config key used for provider options
ConfigProvider = "provider"

View File

@@ -62,6 +62,9 @@ const (
// ConfigAuthorize indicates that we just want "rclone authorize"
ConfigAuthorize = "config_authorize"
// ConfigAuthNoBrowser indicates that we do not want to open browser
ConfigAuthNoBrowser = "config_auth_no_browser"
)
// Global
@@ -572,7 +575,7 @@ func SetValueAndSave(name, key, value string) (err error) {
_, err = reloadedConfigFile.GetSection(name)
if err != nil {
// Section doesn't exist yet so ignore reload
return err
return nil
}
// Update the config file with the reloaded version
configFile = reloadedConfigFile
@@ -635,11 +638,16 @@ func ReadNonEmptyLine(prompt string) string {
return result
}
// Command - choose one
func Command(commands []string) byte {
// CommandDefault - choose one. If return is pressed then it will
// chose the defaultIndex if it is >= 0
func CommandDefault(commands []string, defaultIndex int) byte {
opts := []string{}
for _, text := range commands {
fmt.Printf("%c) %s\n", text[0], text[1:])
for i, text := range commands {
def := ""
if i == defaultIndex {
def = " (default)"
}
fmt.Printf("%c) %s%s\n", text[0], text[1:], def)
opts = append(opts, text[:1])
}
optString := strings.Join(opts, "")
@@ -647,6 +655,9 @@ func Command(commands []string) byte {
for {
fmt.Printf("%s> ", optHelp)
result := strings.ToLower(ReadLine())
if len(result) == 0 && defaultIndex >= 0 {
return optString[defaultIndex]
}
if len(result) != 1 {
continue
}
@@ -657,11 +668,20 @@ func Command(commands []string) byte {
}
}
// Command - choose one
func Command(commands []string) byte {
return CommandDefault(commands, -1)
}
// Confirm asks the user for Yes or No and returns true or false
//
// If AutoConfirm is set, it will return true
func Confirm() bool {
return Command([]string{"yYes", "nNo"}) == 'y'
// If the user presses enter then the Default will be used
func Confirm(Default bool) bool {
defaultIndex := 0
if !Default {
defaultIndex = 1
}
return CommandDefault([]string{"yYes", "nNo"}, defaultIndex) == 'y'
}
// ConfirmWithConfig asks the user for Yes or No and returns true or
@@ -688,7 +708,7 @@ func ConfirmWithConfig(m configmap.Getter, configName string, Default bool) bool
fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default)
return Default
}
return Confirm()
return Confirm(Default)
}
// Choose one of the defaults or type a new string if newOk is set
@@ -797,7 +817,7 @@ func ShowRemote(name string) {
// OkRemote prints the contents of the remote and ask if it is OK
func OkRemote(name string) bool {
ShowRemote(name)
switch i := Command([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}); i {
switch i := CommandDefault([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}, 0); i {
case 'y':
return true
case 'e':
@@ -867,12 +887,14 @@ func ChooseOption(o *fs.Option, name string) string {
fmt.Println(o.Help)
if o.IsPassword {
actions := []string{"yYes type in my own password", "gGenerate random password"}
defaultAction := -1
if !o.Required {
defaultAction = len(actions)
actions = append(actions, "nNo leave this optional password blank")
}
var password string
var err error
switch i := Command(actions); i {
switch i := CommandDefault(actions, defaultAction); i {
case 'y':
password = ChangePassword("the")
case 'g':
@@ -887,7 +909,7 @@ func ChooseOption(o *fs.Option, name string) string {
fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " +
"password itself) will be stored under your \nconfiguration file, so keep this generated password " +
"in a safe place.\n")
if Confirm() {
if Confirm(true) {
break
}
}
@@ -1062,12 +1084,17 @@ func fsOption() *fs.Option {
return o
}
// NewRemoteName asks the user for a name for a remote
// NewRemoteName asks the user for a name for a new remote
func NewRemoteName() (name string) {
for {
fmt.Printf("name> ")
name = ReadLine()
err := fspath.CheckConfigName(name)
_, err := getConfigData().GetSection(name)
if err == nil {
fmt.Printf("Remote %q already exists.\n", name)
continue
}
err = fspath.CheckConfigName(name)
switch {
case name == "":
fmt.Printf("Can't use empty name.\n")
@@ -1092,7 +1119,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
break
}
fmt.Printf("Edit advanced config? (y/n)\n")
if !Confirm() {
if !Confirm(false) {
break
}
}
@@ -1107,7 +1134,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
if !isNew {
fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name))
fmt.Printf("Edit? (y/n)>\n")
if !Confirm() {
if !Confirm(false) {
continue
}
}
@@ -1299,7 +1326,7 @@ func SetPassword() {
//
// rclone authorize "fs name"
// rclone authorize "fs name" "client id" "client secret"
func Authorize(args []string) {
func Authorize(args []string, noAutoBrowser bool) {
defer suppressConfirm()()
switch len(args) {
case 1, 3:
@@ -1319,10 +1346,15 @@ func Authorize(args []string) {
// Indicate that we are running rclone authorize
getConfigData().SetValue(name, ConfigAuthorize, "true")
if noAutoBrowser {
getConfigData().SetValue(name, ConfigAuthNoBrowser, "true")
}
if len(args) == 3 {
getConfigData().SetValue(name, ConfigClientID, args[1])
getConfigData().SetValue(name, ConfigClientSecret, args[2])
}
m := fs.ConfigMap(f, name)
f.Config(name, m)
}

View File

@@ -115,14 +115,6 @@ func TestCRUD(t *testing.T) {
assert.Equal(t, "true", FileGet("asdf", "bool"))
assert.Equal(t, "secret", obscure.MustReveal(FileGet("asdf", "pass")))
// no-op rename, asdf → asdf
RenameRemote("asdf")
assert.Equal(t, []string{"asdf"}, configFile.GetSectionList())
assert.Equal(t, "config_test_remote", FileGet("asdf", "type"))
assert.Equal(t, "true", FileGet("asdf", "bool"))
assert.Equal(t, "secret", obscure.MustReveal(FileGet("asdf", "pass")))
// delete remote
DeleteRemote("asdf")
assert.Equal(t, []string{}, configFile.GetSectionList())
@@ -163,6 +155,28 @@ func TestChooseOption(t *testing.T) {
assert.Equal(t, "", FileGet("test", "pass"))
}
func TestNewRemoteName(t *testing.T) {
defer testConfigFile(t, "crud.conf")()
// script for creating remote
ReadLine = makeReadLine([]string{
"config_test_remote", // type
"true", // bool value
"n", // not required
"y", // looks good, save
})
NewRemote("test")
ReadLine = makeReadLine([]string{
"test", // already exists
"", // empty string not allowed
"bad@characters", // bad characters
"newname", // OK
})
assert.Equal(t, "newname", NewRemoteName())
}
func TestCreateUpatePasswordRemote(t *testing.T) {
defer testConfigFile(t, "update.conf")()

View File

@@ -69,8 +69,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing")
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "use DIR to server side copy flies from.")
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Compare dest to DIR also.")
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.")
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")

View File

@@ -37,6 +37,7 @@ func init() {
AuthRequired: true,
Help: `
Parameters:
- name - name of remote to get
See the [config dump command](/commands/rclone_config_dump/) command for more information on the above.

View File

@@ -178,6 +178,53 @@ func IsNoRetryError(err error) (isNoRetry bool) {
return
}
// NoLowLevelRetrier is an optional interface for error as to whether
// the operation should not be retried at a low level.
//
// NoLowLevelRetry errors won't be retried by low level retry loops.
type NoLowLevelRetrier interface {
error
NoLowLevelRetry() bool
}
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
// NoLowLevelRetrier interface and return true
type wrappedNoLowLevelRetryError struct {
error
}
// NoLowLevelRetry interface
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
// Check interface
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
// Cause returns the underlying error
func (err wrappedNoLowLevelRetryError) Cause() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
}
return false
})
return
}
// RetryAfter is an optional interface for error as to whether the
// operation should be retried after a given delay
//
@@ -230,6 +277,64 @@ func IsRetryAfterError(err error) bool {
return !RetryAfterErrorTime(err).IsZero()
}
// CountableError is an optional interface for error. It stores a boolean
// which signifies if the error has already been counted or not
type CountableError interface {
error
Count()
IsCounted() bool
}
// wrappedFatalError is an error wrapped so it will satisfy the
// Retrier interface and return true
type wrappedCountableError struct {
error
isCounted bool
}
// CountableError interface
func (err *wrappedCountableError) Count() {
err.isCounted = true
}
// CountableError interface
func (err *wrappedCountableError) IsCounted() bool {
return err.isCounted
}
func (err *wrappedCountableError) Cause() error {
return err.error
}
// IsCounted returns true if err conforms to the CountableError interface
// and has already been counted
func IsCounted(err error) bool {
if r, ok := err.(CountableError); ok {
return r.IsCounted()
}
return false
}
// Count sets the isCounted variable on the error if it conforms to the
// CountableError interface
func Count(err error) {
if r, ok := err.(CountableError); ok {
r.Count()
}
}
// Check interface
var _ CountableError = &wrappedCountableError{error: error(nil)}
// FsError makes an error which can keep a record that it is already counted
// or not
func FsError(err error) error {
if err == nil {
err = errors.New("countable error")
}
return &wrappedCountableError{error: err}
}
// Cause is a souped up errors.Cause which can unwrap some standard
// library errors too. It returns true if any of the intermediate
// errors had a Timeout() or Temporary() method which returned true.
@@ -287,6 +392,11 @@ func ShouldRetry(err error) bool {
return false
}
// If error has been marked to NoLowLevelRetry then don't retry
if IsNoLowLevelRetryError(err) {
return false
}
// Find root cause if available
retriable, err := Cause(err)
if retriable {

View File

@@ -318,8 +318,8 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
// Get transactions per second token first if limiting
if tpsBucket != nil {
tbErr := tpsBucket.Wait(req.Context())
if tbErr != nil {
fs.Errorf(nil, "HTTP token bucket error: %v", err)
if tbErr != nil && tbErr != context.Canceled {
fs.Errorf(nil, "HTTP token bucket error: %v", tbErr)
}
}
// Force user agent

View File

@@ -66,7 +66,7 @@ func init() {
MD5 = RegisterHash("MD5", 32, md5.New)
SHA1 = RegisterHash("SHA-1", 40, sha1.New)
Whirlpool = RegisterHash("Whirlpool", 128, whirlpool.New)
CRC32 = RegisterHash("CRC32", 8, func() hash.Hash { return crc32.NewIEEE() })
CRC32 = RegisterHash("CRC-32", 8, func() hash.Hash { return crc32.NewIEEE() })
}
// Supported returns a set of all the supported hashes by

View File

@@ -393,14 +393,14 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
wg.Wait()
if srcListErr != nil {
fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
fs.CountError(srcListErr)
srcListErr = fs.CountError(srcListErr)
return nil, srcListErr
}
if dstListErr == fs.ErrorDirNotFound {
// Copy the stuff anyway
} else if dstListErr != nil {
fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
fs.CountError(dstListErr)
dstListErr = fs.CountError(dstListErr)
return nil, dstListErr
}

View File

@@ -34,7 +34,7 @@ outer:
_, err := f.NewObject(ctx, newName)
for ; err != fs.ErrorObjectNotFound; suffix++ {
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(o, "Failed to check for existing object: %v", err)
continue outer
}
@@ -48,7 +48,7 @@ outer:
if !fs.Config.DryRun {
newObj, err := doMove(ctx, o, newName)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(o, "Failed to rename: %v", err)
continue
}
@@ -211,12 +211,18 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs")
}
duplicateDirs := [][]fs.Directory{}
for _, ds := range dirs {
// make sure parents are before children
duplicateNames := []string{}
for name, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
duplicateNames = append(duplicateNames, name)
}
}
sort.Strings(duplicateNames)
duplicateDirs := [][]fs.Directory{}
for _, name := range duplicateNames {
duplicateDirs = append(duplicateDirs, dirs[name])
}
return duplicateDirs, nil
}
@@ -235,7 +241,8 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
fs.Infof(dirs[0], "Merging contents of duplicate directories")
err := mergeDirs(ctx, dirs)
if err != nil {
return errors.Wrap(err, "merge duplicate dirs")
err = fs.CountError(err)
fs.Errorf(nil, "merge duplicate dirs: %v", err)
}
} else {
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
@@ -251,23 +258,16 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
// Find duplicate directories first and fix them - repeat
// until all fixed
for {
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil {
return err
}
if len(duplicateDirs) == 0 {
break
}
// Find duplicate directories first and fix them
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil {
return err
}
if len(duplicateDirs) != 0 {
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
if err != nil {
return err
}
if fs.Config.DryRun {
break
}
}
// find a hash to use
@@ -275,7 +275,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files
files := map[string][]fs.Object{}
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
remote := o.Remote()
files[remote] = append(files[remote], o)

View File

@@ -63,7 +63,7 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
g.Go(func() (err error) {
srcHash, err = src.Hash(ctx, ht)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(src, "Failed to calculate src hash: %v", err)
}
return err
@@ -71,7 +71,7 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.
g.Go(func() (err error) {
dstHash, err = dst.Hash(ctx, ht)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
}
return err
@@ -234,7 +234,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
}
return false
} else if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dst, "Failed to set modification time: %v", err)
} else {
fs.Infof(src, "Updated modification time in destination")
@@ -408,7 +408,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
break
}
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(src, "Failed to copy: %v", err)
return newDst, err
}
@@ -417,7 +417,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
if sizeDiffers(src, dst) {
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
fs.Errorf(dst, "%v", err)
fs.CountError(err)
err = fs.CountError(err)
removeFailedCopy(ctx, dst)
return newDst, err
}
@@ -429,7 +429,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
if !equal {
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
err = fs.CountError(err)
removeFailedCopy(ctx, dst)
return newDst, err
}
@@ -492,7 +492,7 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
case fs.ErrorCantMove:
fs.Debugf(src, "Can't move, switching to copy")
default:
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(src, "Couldn't move: %v", err)
return newDst, err
}
@@ -558,8 +558,8 @@ func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs
err = dst.Remove(ctx)
}
if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Couldn't %s: %v", action, err)
err = fs.CountError(err)
} else if !fs.Config.DryRun {
fs.Infof(dst, actioned)
}
@@ -685,7 +685,7 @@ func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHas
if !same {
err = errors.Errorf("%v differ", ht)
fs.Errorf(src, "%v", err)
fs.CountError(err)
_ = fs.CountError(err)
return true, false
}
return false, false
@@ -716,11 +716,14 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
}
err := errors.Errorf("File not in %v", c.fsrc)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
case fs.Directory:
// Do the same thing to the entire contents of the directory
if c.oneway {
return false
}
return true
default:
panic("Bad object in DirEntries")
@@ -734,7 +737,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
case fs.Object:
err := errors.Errorf("File not in %v", c.fdst)
fs.Errorf(src, "%v", err)
fs.CountError(err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
case fs.Directory:
@@ -756,7 +759,6 @@ func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (di
if sizeDiffers(src, dst) {
err = errors.Errorf("Sizes differ")
fs.Errorf(src, "%v", err)
fs.CountError(err)
return true, false
}
if fs.Config.SizeOnly {
@@ -784,7 +786,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
} else {
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
fs.Errorf(src, "%v", err)
fs.CountError(err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
}
@@ -796,7 +798,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b
}
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
_ = fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
@@ -923,7 +925,7 @@ func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) {
differ, err := CheckIdentical(ctx, a, b)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(a, "Failed to download: %v", err)
return true, true
}
@@ -1070,7 +1072,7 @@ func Mkdir(ctx context.Context, f fs.Fs, dir string) error {
fs.Debugf(fs.LogDirName(f, dir), "Making directory")
err := f.Mkdir(ctx, dir)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
return err
}
return nil
@@ -1091,7 +1093,7 @@ func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
func Rmdir(ctx context.Context, f fs.Fs, dir string) error {
err := TryRmdir(ctx, f, dir)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
return err
}
return err
@@ -1124,7 +1126,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) error {
err = Rmdirs(ctx, f, dir, false)
}
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
return err
}
return nil
@@ -1167,7 +1169,7 @@ func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
})
if err != nil && err != fs.ErrorDirNotFound {
err = errors.Wrap(err, "failed to list")
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(nil, "%v", err)
}
}()
@@ -1223,7 +1225,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
}
in, err := o.Open(ctx, options...)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(o, "Failed to open: %v", err)
return
}
@@ -1236,7 +1238,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
defer mu.Unlock()
_, err = io.Copy(w, in)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(o, "Failed to send to output: %v", err)
}
})
@@ -1263,7 +1265,7 @@ func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser,
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
if !Equal(ctx, src, dst) {
err = errors.Errorf("corrupted on transfer")
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dst, "%v", err)
return err
}
@@ -1338,7 +1340,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
dirEmpty[dir] = !leaveRoot
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
return nil
}
@@ -1385,7 +1387,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
dir := toDelete[i]
err := TryRmdir(ctx, f, dir)
if err != nil {
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(dir, "Failed to rmdir: %v", err)
return err
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
)
// reOpen is a wrapper for an object reader which reopens the stream on error
@@ -104,7 +105,7 @@ func (h *reOpen) Read(p []byte) (n int, err error) {
h.err = err
}
h.read += int64(n)
if err != nil && err != io.EOF {
if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
// close underlying stream
h.opened = false
_ = h.rc.Close()

View File

@@ -168,6 +168,7 @@ func init() {
Title: "Shows the current version of rclone and the go runtime.",
Help: `
This shows the current version of go and the go runtime
- version - rclone version, eg "v1.44"
- decomposed - version number as [major, minor, patch, subpatch]
- note patch and subpatch will be 999 for a git compiled version
@@ -260,3 +261,77 @@ func rcQuit(ctx context.Context, in Params) (out Params, err error) {
return nil, nil
}
func init() {
Add(Call{
Path: "debug/set-mutex-profile-fraction",
Fn: rcSetMutexProfileFraction,
Title: "Set runtime.SetMutexProfileFraction for mutex profiling.",
Help: `
SetMutexProfileFraction controls the fraction of mutex contention
events that are reported in the mutex profile. On average 1/rate
events are reported. The previous rate is returned.
To turn off profiling entirely, pass rate 0. To just read the current
rate, pass rate < 0. (For n>1 the details of sampling may change.)
Once this is set you can look use this to profile the mutex contention:
go tool pprof http://localhost:5572/debug/pprof/mutex
Parameters
- rate - int
Results
- previousRate - int
`,
})
}
// Terminates app
func rcSetMutexProfileFraction(ctx context.Context, in Params) (out Params, err error) {
rate, err := in.GetInt64("rate")
if err != nil {
return nil, err
}
previousRate := runtime.SetMutexProfileFraction(int(rate))
out = make(Params)
out["previousRate"] = previousRate
return out, nil
}
func init() {
Add(Call{
Path: "debug/set-block-profile-rate",
Fn: rcSetBlockProfileRate,
Title: "Set runtime.SetBlockProfileRate for blocking profiling.",
Help: `
SetBlockProfileRate controls the fraction of goroutine blocking events
that are reported in the blocking profile. The profiler aims to sample
an average of one blocking event per rate nanoseconds spent blocked.
To include every blocking event in the profile, pass rate = 1. To turn
off profiling entirely, pass rate <= 0.
After calling this you can use this to see the blocking profile:
go tool pprof http://localhost:5572/debug/pprof/block
Parameters
- rate - int
`,
})
}
// Terminates app
func rcSetBlockProfileRate(ctx context.Context, in Params) (out Params, err error) {
rate, err := in.GetInt64("rate")
if err != nil {
return nil, err
}
runtime.SetBlockProfileRate(int(rate))
return nil, nil
}

View File

@@ -237,9 +237,11 @@ func init() {
Fn: rcJobStatus,
Title: "Reads the status of the job ID",
Help: `Parameters
- jobid - id of the job (integer)
Results
- finished - boolean
- duration - time in seconds that the job ran for
- endTime - time the job finished (eg "2018-10-26T18:50:20.528746884+01:00")
@@ -282,6 +284,7 @@ func init() {
Help: `Parameters - None
Results
- jobids - array of integer job ids
`,
})
@@ -300,6 +303,7 @@ func init() {
Fn: rcJobStop,
Title: "Stop the running job",
Help: `Parameters
- jobid - id of the job (integer)
`,
})

View File

@@ -926,7 +926,7 @@ func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, cop
fs.Infof(fdst, "Server side directory move succeeded")
return nil
default:
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(fdst, "Server side directory move failed: %v", err)
return err
}

View File

@@ -490,7 +490,7 @@ func TestSyncIgnoreErrors(t *testing.T) {
)
accounting.GlobalStats().ResetCounters()
fs.CountError(errors.New("boom"))
_ = fs.CountError(errors.New("boom"))
assert.NoError(t, Sync(context.Background(), r.Fremote, r.Flocal, false))
fstest.CheckListingWithPrecision(
@@ -800,7 +800,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
)
accounting.GlobalStats().ResetCounters()
fs.CountError(errors.New("boom"))
_ = fs.CountError(errors.New("boom"))
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
assert.Equal(t, fs.ErrorNotDeleting, err)
@@ -1763,5 +1763,7 @@ func TestAbort(t *testing.T) {
accounting.GlobalStats().ResetCounters()
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
assert.Equal(t, accounting.ErrorMaxTransferLimitReached, err)
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReached)
fserrors.Count(expectedErr)
assert.Equal(t, expectedErr, err)
}

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.50.0"
var Version = "v1.50.2-DEV"

View File

@@ -159,7 +159,7 @@ func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLe
// Carry on listing but return the error at the end
if err != nil {
listErr = err
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}
@@ -404,7 +404,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i
// NB once we have passed entries to fn we mustn't touch it again
if err != nil && err != ErrorSkipDir {
traversing.Done()
fs.CountError(err)
err = fs.CountError(err)
fs.Errorf(job.remote, "error listing: %v", err)
closeQuit()
// Send error to error channel if space

View File

@@ -10,7 +10,9 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
_ "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fstest/mockdir"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
@@ -18,6 +20,15 @@ import (
"github.com/stretchr/testify/require"
)
var errDirNotFound, errorBoom error
func init() {
errDirNotFound = fserrors.FsError(fs.ErrorDirNotFound)
fserrors.Count(errDirNotFound)
errorBoom = fserrors.FsError(errors.New("boom"))
fserrors.Count(errorBoom)
}
type (
listResult struct {
entries fs.DirEntries
@@ -196,12 +207,12 @@ func TestWalkREmptySkip(t *testing.T) { testWalkEmptySkip(t).WalkR() }
func testWalkNotFound(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
listResults{
"": {err: fs.ErrorDirNotFound},
"": {err: errDirNotFound},
},
errorMap{
"": fs.ErrorDirNotFound,
"": errDirNotFound,
},
fs.ErrorDirNotFound,
errDirNotFound,
)
}
func TestWalkNotFound(t *testing.T) { testWalkNotFound(t).Walk() }
@@ -211,7 +222,7 @@ func TestWalkNotFoundMaskError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: fs.ErrorDirNotFound},
"": {err: errDirNotFound},
},
errorMap{
"": nil,
@@ -224,7 +235,7 @@ func TestWalkNotFoundSkipError(t *testing.T) {
// this doesn't work for WalkR
newListDirs(t, nil, true,
listResults{
"": {err: fs.ErrorDirNotFound},
"": {err: errDirNotFound},
},
errorMap{
"": ErrorSkipDir,
@@ -342,7 +353,7 @@ func testWalkSkip(t *testing.T) *listDirs {
func TestWalkSkip(t *testing.T) { testWalkSkip(t).Walk() }
func TestWalkRSkip(t *testing.T) { testWalkSkip(t).WalkR() }
func testWalkErrors(t *testing.T) *listDirs {
func walkErrors(t *testing.T, expectedErr error) *listDirs {
lr := listResults{}
em := errorMap{}
de := make(fs.DirEntries, 10)
@@ -357,13 +368,20 @@ func testWalkErrors(t *testing.T) *listDirs {
return newListDirs(t, nil, true,
lr,
em,
fs.ErrorDirNotFound,
expectedErr,
).NoCheckMaps()
}
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
func TestWalkRErrors(t *testing.T) { testWalkErrors(t).WalkR() }
var errorBoom = errors.New("boom")
func testWalkErrors(t *testing.T) *listDirs {
return walkErrors(t, errDirNotFound)
}
func testWalkRErrors(t *testing.T) *listDirs {
return walkErrors(t, fs.ErrorDirNotFound)
}
func TestWalkErrors(t *testing.T) { testWalkErrors(t).Walk() }
func TestWalkRErrors(t *testing.T) { testWalkRErrors(t).WalkR() }
func makeTree(level int, terminalErrors bool) (listResults, errorMap) {
lr := listResults{}

50
go.mod
View File

@@ -2,35 +2,34 @@ module github.com/rclone/rclone
require (
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669
cloud.google.com/go v0.44.3 // indirect
cloud.google.com/go v0.47.0 // indirect
github.com/Azure/azure-pipeline-go v0.2.2
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/Azure/go-autorest/autorest/adal v0.6.0 // indirect
github.com/Unknwon/goconfig v0.0.0-20190425194916-3dba17dd7b9e
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4
github.com/abbot/go-http-auth v0.4.0
github.com/anacrolix/dms v1.0.0
github.com/anacrolix/dms v1.1.0
github.com/atotto/clipboard v0.1.2
github.com/aws/aws-sdk-go v1.23.8
github.com/billziss-gh/cgofuse v1.1.0
github.com/coreos/bbolt v1.3.3
github.com/aws/aws-sdk-go v1.25.31
github.com/billziss-gh/cgofuse v1.2.0
github.com/djherbis/times v1.2.0
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 // indirect
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b
github.com/etcd-io/bbolt v1.3.3
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
github.com/google/go-cmp v0.3.1 // indirect
github.com/google/go-querystring v1.0.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect
github.com/hashicorp/golang-lru v0.5.3 // indirect
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc
github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a
github.com/mattn/go-colorable v0.1.2
github.com/mattn/go-colorable v0.1.4
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-runewidth v0.0.4
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 // indirect
github.com/mattn/go-runewidth v0.0.6
github.com/mitchellh/go-homedir v1.1.0
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2
github.com/ncw/swift v1.0.49
@@ -49,24 +48,27 @@ require (
github.com/smartystreets/assertions v1.0.1 // indirect
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.3
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.4.0
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a
github.com/xanzy/ssh-agent v0.2.1
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a
github.com/yunify/qingstor-sdk-go/v3 v3.0.2
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60
github.com/yunify/qingstor-sdk-go/v3 v3.1.1
go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
go.opencensus.io v0.22.2 // indirect
goftp.io/server v0.0.0-20190812052725-72a57b186803
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056
golang.org/x/text v0.3.2
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/api v0.9.0
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect
google.golang.org/grpc v1.23.0 // indirect
gopkg.in/yaml.v2 v2.2.2
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
google.golang.org/api v0.13.0
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a // indirect
google.golang.org/grpc v1.25.1 // indirect
gopkg.in/yaml.v2 v2.2.5
)
go 1.13

130
go.sum
View File

@@ -4,9 +4,19 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.3 h1:0sMegbmn/8uTwpNkB0q9cLEpZ2W5a6kl+wtBQgPWBJQ=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.47.0 h1:1JUtpcY9E7+eTospEwWS2QXP3DEn7poB3E2j0jN74mM=
cloud.google.com/go v0.47.0/go.mod h1:5p3Ky/7f3N10VBkhuR5LFtddroTiMyjZV/Kj5qOQFxU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gitea.com/goftp/file-driver v0.0.0-20190712091345-f79c2ed973f8/go.mod h1:ghdogu0Da3rwYCSJ20JPgTiMcDpzeRbzvuFIOOW3G7w=
gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34 h1:3wshUWDKHcy8hrNafCS4rtuAdON2KYsuznc05zdHTrQ=
gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34/go.mod h1:6+f1gclV97PmaVmE4YJbH3KIKnl+r3/HWR0zD/z1CG4=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
@@ -37,8 +47,8 @@ github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w7
github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg=
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
github.com/anacrolix/dms v1.0.0 h1:4vs/X5AdF0eRqFXg+EbNUdvY7JUz/a4U84v+VAEa7V8=
github.com/anacrolix/dms v1.0.0/go.mod h1:1TQoem5yf/k/DiVLFFQi+JFQ6GZeKxmJfwGr3goLmFQ=
github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo=
github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k=
github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c=
github.com/anacrolix/ffprobe v1.0.0/go.mod h1:BIw+Bjol6CWjm/CRWrVLk2Vy+UYlkgmBZ05vpSYqZPw=
@@ -47,15 +57,14 @@ github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY=
github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go v1.23.8 h1:G/azJoBN0pnhB3B+0eeC4yyVFYIIad6bbzg6wwtImqk=
github.com/aws/aws-sdk-go v1.23.8/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/billziss-gh/cgofuse v1.1.0 h1:tATn9ZDvuPcOVlvR4tJitGHgAqy1y18+4mKmRfdfjec=
github.com/billziss-gh/cgofuse v1.1.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA=
github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA=
github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -72,16 +81,24 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc=
github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY=
github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8=
github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss=
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b h1:2rRhW1AEs/240C6fpmgGFKlTnh/339r2Cg+ahrkSodo=
github.com/goftp/server v0.0.0-20190712054601-1149070ae46b/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -104,6 +121,7 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@@ -114,16 +132,15 @@ github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORR
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc h1:Mc2Gk3kF0Uqx+cI97pN0gbgZb0DVW2L+htrZSKkOmtE=
github.com/jlaffaye/ftp v0.0.0-20190721194432-7cd8b0bcf3fc/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2 h1:WY3P4euRv9s8F2rpZUK1jnk4ZMiV3O2ltdnoZK/GTUU=
github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@@ -134,6 +151,7 @@ github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6 h1:RyOL4+OIUc
github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
@@ -150,18 +168,18 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-gtk v0.0.0-20190405072524-4deadb416788/go.mod h1:PwzwfeB5syFHXORC3MtPylVcjIoTDT/9cvkKpEndGVI=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-pointer v0.0.0-20180825124634-49522c3f3791/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 h1:tM1L+QoyOIq/0KiBQ4y/jUW0jxB5kz35bz+PSoQYjq8=
github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9/go.mod h1:cxQpGCW53krnBJYXw0m6SYdk+OIHR4jbEstSUj/+MQ4=
github.com/mattn/go-runewidth v0.0.6 h1:V2iyH+aX9C5fsYCpK60U8BYIvmhqxuOL3JZcqc1NB7k=
github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
@@ -170,6 +188,7 @@ github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zX
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=
github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/nsf/termbox-go v0.0.0-20190817171036-93860e161317 h1:hhGN4SFXgXo61Q4Sjj/X9sBjyeSa2kdpaOzCO+8EVQw=
github.com/nsf/termbox-go v0.0.0-20190817171036-93860e161317/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ=
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
@@ -191,10 +210,12 @@ github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877 h1:sKIa5MAIViLAnQbEo+uiDi2FMowy8KcdZW8XZpmyNxs=
github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877/go.mod h1:EWtDL88jJLLWZzywr0QaPO+mGP8gFpvl8dcox8qTk3Y=
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk=
github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
@@ -217,6 +238,8 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -228,39 +251,57 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709 h1:Ko2LQMrRU+Oy
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686 h1:U7mF+tjDK9zWoxCU+kBNa1XT7WZMF5bjwtRpjeIkSYw=
github.com/t3rm1n4l/go-mega v0.0.0-20190528125457-55e675378686/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a h1:9VwG6wBA1jd6oOCnmQ/OaKM1GRfChadtH5N3bx1oSKE=
github.com/t3rm1n4l/go-mega v0.0.0-20191014094753-e8695d78299a/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a h1:wRlvyDgRuJOLgD2vcuBUbEduzTkcN7quLip1EnX/Dl4=
github.com/youmark/pkcs8 v0.0.0-20181201043747-70daafe5d78a/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yunify/qingstor-sdk-go/v3 v3.0.2 h1:2pL3tEj6eEESsHKrqsLZ5D+OkHEhYfsW1xwYRcHCgZs=
github.com/yunify/qingstor-sdk-go/v3 v3.0.2/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 h1:Ud2neINE1YFEwrcJ4EqnbRZlm9R3T8SuFKeqjIw7k44=
github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yunify/qingstor-sdk-go/v3 v3.1.1 h1:jQkY9N+zSL8h8CqgrDQpXe8/mqJOx8vgGjk6O//RA/4=
github.com/yunify/qingstor-sdk-go/v3 v3.1.1/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
goftp.io/server v0.0.0-20190712054601-1149070ae46b/go.mod h1:xreggPYu7ZuNe9PfbxiQca7bYGwU44IvlCCg3KzWJtQ=
goftp.io/server v0.0.0-20190812034929-9b3874d17690/go.mod h1:99FISrRpwKfaL4Ey/dX8N48WToveng/s2OXR5sJ3cnc=
goftp.io/server v0.0.0-20190812052725-72a57b186803 h1:I2IgXYRuOZ6LceE7VY6aSnYuUy6Wot3WFhqI5WsAHXQ=
goftp.io/server v0.0.0-20190812052725-72a57b186803/go.mod h1:eDjthxa5tFTS2JVry2jHt1g9y3J0Vgu2Nd+lmNWev7Y=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a h1:R/qVym5WAxsZWQqZCwDY/8sdVKV1m1WgU4/S5IRQAzc=
golang.org/x/crypto v0.0.0-20191108234033-bd318be0434a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -274,8 +315,8 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5 h1:bHNaocaoJxYBo5cw41UyTMLjYlb8wPY7+WFrnklbHOM=
golang.org/x/net v0.0.0-20191109021931-daa7c04131f5/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@@ -286,6 +327,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -300,8 +343,9 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc h1:Cgiu447JccQnHt7K/DbJbw1DbXAUHwOtU7ObeOCVsc4=
golang.org/x/sys v0.0.0-20190826163724-acd9dae8e8cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
@@ -309,6 +353,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -320,18 +366,28 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191010171213-8abd42400456/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -340,15 +396,22 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
@@ -356,8 +419,11 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=

View File

@@ -386,6 +386,8 @@ func doConfig(id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, o
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
authorizeOnlyValue, ok := m.Get(config.ConfigAuthorize)
authorizeOnly := ok && authorizeOnlyValue != "" // set if being run by "rclone authorize"
authorizeNoAutoBrowserValue, ok := m.Get(config.ConfigAuthNoBrowser)
authorizeNoAutoBrowser := ok && authorizeNoAutoBrowserValue != ""
// See if already have a token
tokenString, ok := m.Get("token")
@@ -470,9 +472,13 @@ func doConfig(id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, o
authURL = "http://" + bindAddress + "/auth?state=" + state
}
// Open the URL for the user to visit
_ = open.Start(authURL)
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
if !authorizeNoAutoBrowser && oauthConfig.RedirectURL != TitleBarRedirectURL {
// Open the URL for the user to visit
_ = open.Start(authURL)
fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL)
} else {
fmt.Printf("Please go to the following link: %s\n", authURL)
}
fmt.Printf("Log in and authorize rclone for access\n")
// Read the code via the webserver or manually

2
vendor/bazil.org/fuse/fuse.go generated vendored
View File

@@ -1004,7 +1004,7 @@ loop:
}
case opBmap:
panic("opBmap")
goto unrecognized
case opDestroy:
req = &DestroyRequest{

View File

@@ -0,0 +1,12 @@
{
"name": "metadata",
"name_pretty": "Google Compute Engine Metadata API",
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
"release_level": "ga",
"language": "go",
"repo": "googleapis/google-cloud-go",
"distribution_name": "cloud.google.com/go/compute/metadata",
"api_id": "compute:metadata",
"requires_billing": false
}

View File

@@ -227,6 +227,9 @@ func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Email calls Client.Email on the default client.
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
@@ -367,6 +370,16 @@ func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")

View File

@@ -12,6 +12,7 @@ import (
type Config struct {
Config *aws.Config
Handlers request.Handlers
PartitionID string
Endpoint string
SigningRegion string
SigningName string
@@ -64,7 +65,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
maxRetries = DefaultRetryerMaxNumRetries
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}

View File

@@ -1,6 +1,7 @@
package client
import (
"math"
"strconv"
"time"
@@ -9,69 +10,142 @@ import (
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
// most services. If you want to implement custom retry logic, you can implement the
// request.Retryer interface.
//
// type retryer struct {
// client.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() int { return 100 }
type DefaultRetryer struct {
// Num max Retries is the number of max retries that will be performed.
// By default, this is zero.
NumMaxRetries int
// MinRetryDelay is the minimum retry delay after which retry will be performed.
// If not set, the value is 0ns.
MinRetryDelay time.Duration
// MinThrottleRetryDelay is the minimum retry delay when throttled.
// If not set, the value is 0ns.
MinThrottleDelay time.Duration
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
// If not set, the value is 0ns.
MaxRetryDelay time.Duration
// MaxThrottleDelay is the maximum retry delay when throttled.
// If not set, the value is 0ns.
MaxThrottleDelay time.Duration
}
const (
// DefaultRetryerMaxNumRetries sets maximum number of retries
DefaultRetryerMaxNumRetries = 3
// DefaultRetryerMinRetryDelay sets minimum retry delay
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
// DefaultRetryerMaxRetryDelay sets maximum retry delay
DefaultRetryerMaxRetryDelay = 300 * time.Second
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
DefaultRetryerMaxThrottleDelay = 300 * time.Second
)
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// setRetryerDefaults sets the default values of the retryer if not set
func (d *DefaultRetryer) setRetryerDefaults() {
if d.MinRetryDelay == 0 {
d.MinRetryDelay = DefaultRetryerMinRetryDelay
}
if d.MaxRetryDelay == 0 {
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
}
if d.MinThrottleDelay == 0 {
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
}
if d.MaxThrottleDelay == 0 {
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
}
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
// if number of max retries is zero, no retries will be performed.
if d.NumMaxRetries == 0 {
return 0
}
// Sets default value for retryer members
d.setRetryerDefaults()
// minDelay is the minimum retryer delay
minDelay := d.MinRetryDelay
var initialDelay time.Duration
isThrottle := r.IsErrorThrottle()
if isThrottle {
if delay, ok := getRetryDelay(r); ok {
return delay
if delay, ok := getRetryAfterDelay(r); ok {
initialDelay = delay
}
minTime = 500
minDelay = d.MinThrottleDelay
}
retryCount := r.RetryCount
if isThrottle && retryCount > 8 {
retryCount = 8
} else if retryCount > 13 {
retryCount = 13
// maxDelay the maximum retryer delay
maxDelay := d.MaxRetryDelay
if isThrottle {
maxDelay = d.MaxThrottleDelay
}
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
var delay time.Duration
// Logic to cap the retry count based on the minDelay provided
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
if actualRetryCount < 63-retryCount {
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
if delay > maxDelay {
delay = getJitterDelay(maxDelay / 2)
}
} else {
delay = getJitterDelay(maxDelay / 2)
}
return delay + initialDelay
}
// getJitterDelay returns a jittered delay for retry
func getJitterDelay(duration time.Duration) time.Duration {
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
// ShouldRetry returns false if number of max retries is 0.
if d.NumMaxRetries == 0 {
return false
}
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable != nil {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
return true
}
return r.IsErrorRetryable() || r.IsErrorThrottle()
}
// This will look in the Retry-After header, RFC 7231, for how long
// it will wait before attempting another request
func getRetryDelay(r *request.Request) (time.Duration, bool) {
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
if !canUseRetryAfterHeader(r) {
return 0, false
}

View File

@@ -5,6 +5,7 @@ type ClientInfo struct {
ServiceName string
ServiceID string
APIVersion string
PartitionID string
Endpoint string
SigningName string
SigningRegion string

View File

@@ -0,0 +1,28 @@
package client
import (
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// NoOpRetryer provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type NoOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d NoOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
return 0
}

View File

@@ -246,6 +246,9 @@ type Config struct {
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
// STSRegionalEndpoint will enable regional or legacy endpoint resolving
STSRegionalEndpoint endpoints.STSRegionalEndpoint
}
// NewConfig returns a new Config pointer that can be chained with builder
@@ -420,6 +423,13 @@ func (c *Config) MergeIn(cfgs ...*Config) {
}
}
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
c.STSRegionalEndpoint = sre
return c
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@@ -520,6 +530,10 @@ func mergeInConfig(dst *Config, other *Config) {
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
dst.STSRegionalEndpoint = other.STSRegionalEndpoint
}
}
// Copy will return a shallow copy of the Config object. If any additional

View File

@@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int {
return dst
}
// Uint returns a pointer to the uint value passed in.
func Uint(v uint) *uint {
return &v
}
// UintValue returns the value of the uint pointer passed in or
// 0 if the pointer is nil.
func UintValue(v *uint) uint {
if v != nil {
return *v
}
return 0
}
// UintSlice converts a slice of uint values uinto a slice of
// uint pointers
func UintSlice(src []uint) []*uint {
dst := make([]*uint, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// UintValueSlice converts a slice of uint pointers uinto a slice of
// uint values
func UintValueSlice(src []*uint) []uint {
dst := make([]uint, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// UintMap converts a string map of uint values uinto a string
// map of uint pointers
func UintMap(src map[string]uint) map[string]*uint {
dst := make(map[string]*uint)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// UintValueMap converts a string map of uint pointers uinto a string
// map of uint values
func UintValueMap(src map[string]*uint) map[string]uint {
dst := make(map[string]uint)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int8 returns a pointer to the int8 value passed in.
func Int8(v int8) *int8 {
return &v
}
// Int8Value returns the value of the int8 pointer passed in or
// 0 if the pointer is nil.
func Int8Value(v *int8) int8 {
if v != nil {
return *v
}
return 0
}
// Int8Slice converts a slice of int8 values into a slice of
// int8 pointers
func Int8Slice(src []int8) []*int8 {
dst := make([]*int8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int8ValueSlice converts a slice of int8 pointers into a slice of
// int8 values
func Int8ValueSlice(src []*int8) []int8 {
dst := make([]int8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int8Map converts a string map of int8 values into a string
// map of int8 pointers
func Int8Map(src map[string]int8) map[string]*int8 {
dst := make(map[string]*int8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int8ValueMap converts a string map of int8 pointers into a string
// map of int8 values
func Int8ValueMap(src map[string]*int8) map[string]int8 {
dst := make(map[string]int8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int16 returns a pointer to the int16 value passed in.
func Int16(v int16) *int16 {
return &v
}
// Int16Value returns the value of the int16 pointer passed in or
// 0 if the pointer is nil.
func Int16Value(v *int16) int16 {
if v != nil {
return *v
}
return 0
}
// Int16Slice converts a slice of int16 values into a slice of
// int16 pointers
func Int16Slice(src []int16) []*int16 {
dst := make([]*int16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int16ValueSlice converts a slice of int16 pointers into a slice of
// int16 values
func Int16ValueSlice(src []*int16) []int16 {
dst := make([]int16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int16Map converts a string map of int16 values into a string
// map of int16 pointers
func Int16Map(src map[string]int16) map[string]*int16 {
dst := make(map[string]*int16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int16ValueMap converts a string map of int16 pointers into a string
// map of int16 values
func Int16ValueMap(src map[string]*int16) map[string]int16 {
dst := make(map[string]int16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int32 returns a pointer to the int32 value passed in.
func Int32(v int32) *int32 {
return &v
}
// Int32Value returns the value of the int32 pointer passed in or
// 0 if the pointer is nil.
func Int32Value(v *int32) int32 {
if v != nil {
return *v
}
return 0
}
// Int32Slice converts a slice of int32 values into a slice of
// int32 pointers
func Int32Slice(src []int32) []*int32 {
dst := make([]*int32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int32ValueSlice converts a slice of int32 pointers into a slice of
// int32 values
func Int32ValueSlice(src []*int32) []int32 {
dst := make([]int32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int32Map converts a string map of int32 values into a string
// map of int32 pointers
func Int32Map(src map[string]int32) map[string]*int32 {
dst := make(map[string]*int32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int32ValueMap converts a string map of int32 pointers into a string
// map of int32 values
func Int32ValueMap(src map[string]*int32) map[string]int32 {
dst := make(map[string]int32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
@@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
return dst
}
// Uint8 returns a pointer to the uint8 value passed in.
func Uint8(v uint8) *uint8 {
return &v
}
// Uint8Value returns the value of the uint8 pointer passed in or
// 0 if the pointer is nil.
func Uint8Value(v *uint8) uint8 {
if v != nil {
return *v
}
return 0
}
// Uint8Slice converts a slice of uint8 values into a slice of
// uint8 pointers
func Uint8Slice(src []uint8) []*uint8 {
dst := make([]*uint8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
// uint8 values
func Uint8ValueSlice(src []*uint8) []uint8 {
dst := make([]uint8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint8Map converts a string map of uint8 values into a string
// map of uint8 pointers
func Uint8Map(src map[string]uint8) map[string]*uint8 {
dst := make(map[string]*uint8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint8ValueMap converts a string map of uint8 pointers into a string
// map of uint8 values
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
dst := make(map[string]uint8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint16 returns a pointer to the uint16 value passed in.
func Uint16(v uint16) *uint16 {
return &v
}
// Uint16Value returns the value of the uint16 pointer passed in or
// 0 if the pointer is nil.
func Uint16Value(v *uint16) uint16 {
if v != nil {
return *v
}
return 0
}
// Uint16Slice converts a slice of uint16 values into a slice of
// uint16 pointers
func Uint16Slice(src []uint16) []*uint16 {
dst := make([]*uint16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
// uint16 values
func Uint16ValueSlice(src []*uint16) []uint16 {
dst := make([]uint16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint16Map converts a string map of uint16 values into a string
// map of uint16 pointers
func Uint16Map(src map[string]uint16) map[string]*uint16 {
dst := make(map[string]*uint16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint16ValueMap converts a string map of uint16 pointers into a string
// map of uint16 values
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
dst := make(map[string]uint16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint32 returns a pointer to the uint32 value passed in.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint32Value returns the value of the uint32 pointer passed in or
// 0 if the pointer is nil.
func Uint32Value(v *uint32) uint32 {
if v != nil {
return *v
}
return 0
}
// Uint32Slice converts a slice of uint32 values into a slice of
// uint32 pointers
func Uint32Slice(src []uint32) []*uint32 {
dst := make([]*uint32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
// uint32 values
func Uint32ValueSlice(src []*uint32) []uint32 {
dst := make([]uint32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint32Map converts a string map of uint32 values into a string
// map of uint32 pointers
func Uint32Map(src map[string]uint32) map[string]*uint32 {
dst := make(map[string]*uint32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint32ValueMap converts a string map of uint32 pointers into a string
// map of uint32 values
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
dst := make(map[string]uint32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint64 returns a pointer to the uint64 value passed in.
func Uint64(v uint64) *uint64 {
return &v
}
// Uint64Value returns the value of the uint64 pointer passed in or
// 0 if the pointer is nil.
func Uint64Value(v *uint64) uint64 {
if v != nil {
return *v
}
return 0
}
// Uint64Slice converts a slice of uint64 values into a slice of
// uint64 pointers
func Uint64Slice(src []uint64) []*uint64 {
dst := make([]*uint64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
// uint64 values
func Uint64ValueSlice(src []*uint64) []uint64 {
dst := make([]uint64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint64Map converts a string map of uint64 values into a string
// map of uint64 pointers
func Uint64Map(src map[string]uint64) map[string]*uint64 {
dst := make(map[string]*uint64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint64ValueMap converts a string map of uint64 pointers into a string
// map of uint64 values
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
dst := make(map[string]uint64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float32 returns a pointer to the float32 value passed in.
func Float32(v float32) *float32 {
return &v
}
// Float32Value returns the value of the float32 pointer passed in or
// 0 if the pointer is nil.
func Float32Value(v *float32) float32 {
if v != nil {
return *v
}
return 0
}
// Float32Slice converts a slice of float32 values into a slice of
// float32 pointers
func Float32Slice(src []float32) []*float32 {
dst := make([]*float32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float32ValueSlice converts a slice of float32 pointers into a slice of
// float32 values
func Float32ValueSlice(src []*float32) []float32 {
dst := make([]float32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float32Map converts a string map of float32 values into a string
// map of float32 pointers
func Float32Map(src map[string]float32) map[string]*float32 {
dst := make(map[string]*float32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float32ValueMap converts a string map of float32 pointers into a string
// map of float32 values
func Float32ValueMap(src map[string]*float32) map[string]float32 {
dst := make(map[string]float32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v

View File

@@ -16,25 +16,26 @@ var (
type metricChan struct {
ch chan metric
paused int64
paused *int64
}
func newMetricChan(size int) metricChan {
return metricChan{
ch: make(chan metric, size),
ch: make(chan metric, size),
paused: new(int64),
}
}
func (ch *metricChan) Pause() {
atomic.StoreInt64(&ch.paused, pausedEnum)
atomic.StoreInt64(ch.paused, pausedEnum)
}
func (ch *metricChan) Continue() {
atomic.StoreInt64(&ch.paused, runningEnum)
atomic.StoreInt64(ch.paused, runningEnum)
}
func (ch *metricChan) IsPaused() bool {
v := atomic.LoadInt64(&ch.paused)
v := atomic.LoadInt64(ch.paused)
return v == pausedEnum
}

View File

@@ -66,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
XAmzRequestID: aws.String(r.RequestID),
AttemptCount: aws.Int(r.RetryCount + 1),
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
AccessKey: aws.String(creds.AccessKeyID),
}

View File

@@ -152,18 +152,19 @@ type EC2IAMInfo struct {
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
DevpayProductCodes []string `json:"devpayProductCodes"`
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ package endpoints
import (
"fmt"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
)
@@ -46,6 +47,43 @@ type Options struct {
//
// This option is ignored if StrictMatching is enabled.
ResolveUnknownService bool
// STS Regional Endpoint flag helps with resolving the STS endpoint
STSRegionalEndpoint STSRegionalEndpoint
}
// STSRegionalEndpoint is an enum type alias for int
// It is used internally by the core sdk as STS Regional Endpoint flag value
type STSRegionalEndpoint int
const (
// UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
UnsetSTSEndpoint STSRegionalEndpoint = iota
// LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
// to use legacy endpoints.
LegacySTSEndpoint
// RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
// to use regional endpoints.
RegionalSTSEndpoint
)
// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
// on the input string provided in env config or shared config by the user.
//
// `legacy`, `regional` are the only case-insensitive valid strings for
// resolving the STS regional Endpoint flag.
func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
switch {
case strings.EqualFold(s, "legacy"):
return LegacySTSEndpoint, nil
case strings.EqualFold(s, "regional"):
return RegionalSTSEndpoint, nil
default:
return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
}
}
// Set combines all of the option functions together.
@@ -79,6 +117,12 @@ func ResolveUnknownServiceOption(o *Options) {
o.ResolveUnknownService = true
}
// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
// STS endpoint to their regional endpoint, instead of the global endpoint.
func STSRegionalEndpointOption(o *Options) {
o.STSRegionalEndpoint = RegionalSTSEndpoint
}
// A Resolver provides the interface for functionality to resolve endpoints.
// The build in Partition and DefaultResolver return value satisfy this interface.
type Resolver interface {
@@ -194,7 +238,7 @@ func (p Partition) ID() string { return p.id }
// require the provided service and region to be known by the partition.
// If the endpoint cannot be strictly resolved an error will be returned. This
// mode is useful to ensure the endpoint resolved is valid. Without
// StrictMatching enabled the endpoint returned my look valid but may not work.
// StrictMatching enabled the endpoint returned may look valid but may not work.
// StrictMatching requires the SDK to be updated if you want to take advantage
// of new regions and services expansions.
//
@@ -350,6 +394,9 @@ type ResolvedEndpoint struct {
// The endpoint URL
URL string
// The endpoint partition
PartitionID string
// The region that should be used for signing requests.
SigningRegion string

View File

@@ -0,0 +1,19 @@
package endpoints
var stsLegacyGlobalRegions = map[string]struct{}{
"ap-northeast-1": {},
"ap-south-1": {},
"ap-southeast-1": {},
"ap-southeast-2": {},
"ca-central-1": {},
"eu-central-1": {},
"eu-north-1": {},
"eu-west-1": {},
"eu-west-2": {},
"eu-west-3": {},
"sa-east-1": {},
"us-east-1": {},
"us-east-2": {},
"us-west-1": {},
"us-west-2": {},
}

View File

@@ -75,24 +75,55 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool)
return p.RegionRegex.MatchString(region)
}
func allowLegacyEmptyRegion(service string) bool {
legacy := map[string]struct{}{
"budgets": {},
"ce": {},
"chime": {},
"cloudfront": {},
"ec2metadata": {},
"iam": {},
"importexport": {},
"organizations": {},
"route53": {},
"sts": {},
"support": {},
"waf": {},
}
_, allowed := legacy[service]
return allowed
}
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
var opt Options
opt.Set(opts...)
s, hasService := p.Services[service]
if !(hasService || opt.ResolveUnknownService) {
if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
// Only return error if the resolver will not fallback to creating
// endpoint based on service endpoint ID passed in.
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
}
if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
region = s.PartitionEndpoint
}
if service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint {
if _, ok := stsLegacyGlobalRegions[region]; ok {
region = "aws-global"
}
}
e, hasEndpoint := s.endpointForRegion(region)
if !hasEndpoint && opt.StrictMatching {
if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) {
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
}
defs := []endpoint{p.Defaults, s.Defaults}
return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil
}
func serviceList(ss services) []string {
@@ -201,7 +232,7 @@ func getByPriority(s []string, p []string, def string) string {
return s[0]
}
func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
var merged endpoint
for _, def := range defs {
merged.mergeIn(def)
@@ -237,6 +268,7 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op
return ResolvedEndpoint{
URL: u,
PartitionID: partitionID,
SigningRegion: signingRegion,
SigningName: signingName,
SigningNameDerived: signingNameDerived,

View File

@@ -23,7 +23,7 @@ type Handlers struct {
Complete HandlerList
}
// Copy returns of this handler's lists.
// Copy returns a copy of this handler's lists.
func (h *Handlers) Copy() Handlers {
return Handlers{
Validate: h.Validate.copy(),
@@ -42,7 +42,7 @@ func (h *Handlers) Copy() Handlers {
}
}
// Clear removes callback functions for all handlers
// Clear removes callback functions for all handlers.
func (h *Handlers) Clear() {
h.Validate.Clear()
h.Build.Clear()

View File

@@ -99,8 +99,12 @@ type Operation struct {
BeforePresignFn func(r *Request) error
}
// New returns a new Request pointer for the service API
// operation and parameters.
// New returns a new Request pointer for the service API operation and
// parameters.
//
// A Retryer should be provided to direct how the request is retried. If
// Retryer is nil, a default no retry value will be used. You can use
// NoOpRetryer in the Client package to disable retry behavior directly.
//
// Params is any value of input parameters to be the request payload.
// Data is pointer value to an object which the request's response
@@ -108,6 +112,10 @@ type Operation struct {
func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
if retryer == nil {
retryer = noOpRetryer{}
}
method := operation.HTTPMethod
if method == "" {
method = "POST"

View File

@@ -35,10 +35,41 @@ type Retryer interface {
}
// WithRetryer sets a Retryer value to the given Config returning the Config
// value for chaining.
// value for chaining. The value must not be nil.
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
if retryer == nil {
if cfg.Logger != nil {
cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
}
retryer = noOpRetryer{}
}
cfg.Retryer = retryer
return cfg
}
// noOpRetryer is a internal no op retryer used when a request is created
// without a retryer.
//
// Provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type noOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d noOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d noOpRetryer) ShouldRetry(_ *Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
return 0
}
// retryableCodes is a collection of service response codes which are retry-able
@@ -94,10 +125,6 @@ var validParentCodes = map[string]struct{}{
ErrCodeRead: {},
}
type temporaryError interface {
Temporary() bool
}
func isNestedErrorRetryable(parentErr awserr.Error) bool {
if parentErr == nil {
return false
@@ -116,7 +143,7 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
return isCodeRetryable(aerr.Code())
}
if t, ok := err.(temporaryError); ok {
if t, ok := err.(temporary); ok {
return t.Temporary() || isErrConnectionReset(err)
}
@@ -223,6 +250,16 @@ func (r *Request) IsErrorRetryable() bool {
return true
}
// HTTP response status code 501 should not be retried.
// 501 represents Not Implemented which means the request method is not
// supported by the server and cannot be handled.
if r.HTTPResponse != nil {
// HTTP response status code 500 represents internal server error and
// should be retried without any throttle.
if r.HTTPResponse.StatusCode == 500 {
return true
}
}
return IsErrorRetryable(r.Error)
}
@@ -237,7 +274,11 @@ func (r *Request) IsErrorThrottle() bool {
if r.HTTPResponse != nil {
switch r.HTTPResponse.StatusCode {
case 429, 502, 503, 504:
case
429, // error caused due to too many requests
502, // Bad Gateway error should be throttled
503, // caused when service is unavailable
504: // error occurred due to gateway timeout
return true
}
}

View File

@@ -1,12 +1,14 @@
package session
import (
"fmt"
"os"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
)
// EnvProviderName provides a name of the provider when config is loaded from environment.
@@ -125,6 +127,12 @@ type envConfig struct {
//
// AWS_ROLE_SESSION_NAME=session_name
RoleSessionName string
// Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
//
// AWS_STS_REGIONAL_ENDPOINTS =sts_regional_endpoint
// This can take value as `regional` or `legacy`
STSRegionalEndpoint endpoints.STSRegionalEndpoint
}
var (
@@ -179,6 +187,9 @@ var (
roleSessionNameEnvKey = []string{
"AWS_ROLE_SESSION_NAME",
}
stsRegionalEndpointKey = []string{
"AWS_STS_REGIONAL_ENDPOINTS",
}
)
// loadEnvConfig retrieves the SDK's environment configuration.
@@ -187,7 +198,7 @@ var (
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
// the shared SDK config will be loaded in addition to the SDK's specific
// configuration values.
func loadEnvConfig() envConfig {
func loadEnvConfig() (envConfig, error) {
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
return envConfigLoad(enableSharedConfig)
}
@@ -198,11 +209,11 @@ func loadEnvConfig() envConfig {
// Loads the shared configuration in addition to the SDK's specific configuration.
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
// environment variable is set.
func loadSharedEnvConfig() envConfig {
func loadSharedEnvConfig() (envConfig, error) {
return envConfigLoad(true)
}
func envConfigLoad(enableSharedConfig bool) envConfig {
func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
cfg := envConfig{}
cfg.EnableSharedConfig = enableSharedConfig
@@ -264,12 +275,23 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
return cfg
// STS Regional Endpoint variable
for _, k := range stsRegionalEndpointKey {
if v := os.Getenv(k); len(v) != 0 {
STSRegionalEndpoint, err := endpoints.GetSTSRegionalEndpoint(v)
if err != nil {
return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
}
cfg.STSRegionalEndpoint = STSRegionalEndpoint
}
}
return cfg, nil
}
func setFromEnvVal(dst *string, keys []string) {
for _, k := range keys {
if v := os.Getenv(k); len(v) > 0 {
if v := os.Getenv(k); len(v) != 0 {
*dst = v
break
}

View File

@@ -73,7 +73,7 @@ type Session struct {
// func is called instead of waiting to receive an error until a request is made.
func New(cfgs ...*aws.Config) *Session {
// load initial config from environment
envCfg := loadEnvConfig()
envCfg, envErr := loadEnvConfig()
if envCfg.EnableSharedConfig {
var cfg aws.Config
@@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session {
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s = &Session{Config: defaults.Config()}
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
s.logDeprecatedNewSessionError(msg, err, cfgs)
}
return s
}
s := deprecatedNewSession(cfgs...)
if envErr != nil {
msg := "failed to load env config"
s.logDeprecatedNewSessionError(msg, envErr, cfgs)
}
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
if l := s.Config.Logger; l != nil {
@@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session {
} else if csmCfg.Enabled {
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
if err != nil {
err = fmt.Errorf("failed to enable CSM, %v", err)
s.Config.Logger.Log("ERROR:", err.Error())
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
msg := "failed to enable CSM"
s.logDeprecatedNewSessionError(msg, err, cfgs)
}
}
@@ -136,7 +133,7 @@ func New(cfgs ...*aws.Config) *Session {
// to be built with retrieving credentials with AssumeRole set in the config.
//
// See the NewSessionWithOptions func for information on how to override or
// control through code how the Session will be created. Such as specifying the
// control through code how the Session will be created, such as specifying the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
opts := Options{}
@@ -279,10 +276,17 @@ type Options struct {
// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
var err error
if opts.SharedConfigState == SharedConfigEnable {
envCfg = loadSharedEnvConfig()
envCfg, err = loadSharedEnvConfig()
if err != nil {
return nil, fmt.Errorf("failed to load shared config, %v", err)
}
} else {
envCfg = loadEnvConfig()
envCfg, err = loadEnvConfig()
if err != nil {
return nil, fmt.Errorf("failed to load environment config, %v", err)
}
}
if len(opts.Profile) != 0 {
@@ -550,6 +554,9 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
}
}
// Regional Endpoint flag for STS endpoint resolving
mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg)
// Configure credentials if not already set by the user when creating the
// Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
@@ -563,6 +570,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
return nil
}
// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from
// envConfig and SharedConfig with envConfig being given precedence over SharedConfig
func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint
if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint
}
if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint {
cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint
}
return nil
}
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
@@ -571,7 +594,7 @@ func initHandlers(s *Session) {
}
}
// Copy creates and returns a copy of the current Session, coping the config
// Copy creates and returns a copy of the current Session, copying the config
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
//
@@ -591,37 +614,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
// Backwards compatibility, the error will be eaten if user calls ClientConfig
// directly. All SDK services will use ClientconfigWithError.
cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
return cfg
}
func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
var err error
region := aws.StringValue(s.Config.Region)
if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
} else {
resolved, err = s.Config.EndpointResolver.EndpointFor(
serviceName, region,
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
// Support the condition where the service is modeled but its
// endpoint metadata is not available.
opt.ResolveUnknownService = true
},
)
resolved, err := s.resolveEndpoint(service, region, s.Config)
if err != nil && s.Config.Logger != nil {
s.Config.Logger.Log(fmt.Sprintf(
"ERROR: unable to resolve endpoint for service %q, region %q, err: %v",
service, region, err))
}
return client.Config{
@@ -631,7 +632,37 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (
SigningRegion: resolved.SigningRegion,
SigningNameDerived: resolved.SigningNameDerived,
SigningName: resolved.SigningName,
}, err
}
}
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
return endpoints.ResolvedEndpoint{
URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
SigningRegion: region,
}, nil
}
resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
func(opt *endpoints.Options) {
opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
// Support for STSRegionalEndpoint where the STSRegionalEndpoint is
// provided in envConfig or sharedConfig with envConfig getting
// precedence.
opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
// Support the condition where the service is modeled but its
// endpoint metadata is not available.
opt.ResolveUnknownService = true
},
)
if err != nil {
return endpoints.ResolvedEndpoint{}, err
}
return resolved, nil
}
// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
@@ -641,12 +672,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
s = s.Copy(cfgs...)
var resolved endpoints.ResolvedEndpoint
region := aws.StringValue(s.Config.Region)
if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
resolved.SigningRegion = region
resolved.SigningRegion = aws.StringValue(s.Config.Region)
}
return client.Config{
@@ -658,3 +686,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf
SigningName: resolved.SigningName,
}
}
// logDeprecatedNewSessionError function enables error handling for session
func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/internal/ini"
)
@@ -40,6 +41,9 @@ const (
// Web Identity Token File
webIdentityTokenFileKey = `web_identity_token_file` // optional
// Additional config fields for regional or legacy endpoints
stsRegionalEndpointSharedKey = `sts_regional_endpoints`
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
// is not provided.
@@ -82,12 +86,17 @@ type sharedConfig struct {
//
// endpoint_discovery_enabled = true
EnableEndpointDiscovery *bool
// CSM Options
CSMEnabled *bool
CSMHost string
CSMPort string
CSMClientID string
// Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service
//
// sts_regional_endpoints = sts_regional_endpoint
// This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
STSRegionalEndpoint endpoints.STSRegionalEndpoint
}
type sharedConfigFile struct {
@@ -244,8 +253,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
updateString(&cfg.SourceProfileName, section, sourceProfileKey)
updateString(&cfg.CredentialSource, section, credentialSourceKey)
updateString(&cfg.Region, section, regionKey)
if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
sre, err := endpoints.GetSTSRegionalEndpoint(v)
if err != nil {
return fmt.Errorf("failed to load %s from shared config, %s, %v",
stsRegionalEndpointKey, file.Filename, err)
}
cfg.STSRegionalEndpoint = sre
}
}
updateString(&cfg.CredentialProcess, section, credentialProcessKey)

View File

@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.23.8"
const SDKVersion = "1.25.31"

View File

@@ -162,7 +162,7 @@ loop:
if len(tokens) == 0 {
break loop
}
// if should skip is true, we skip the tokens until should skip is set to false.
step = SkipTokenState
}
@@ -218,7 +218,7 @@ loop:
// S -> equal_expr' expr_stmt'
switch k.Kind {
case ASTKindEqualExpr:
// assiging a value to some key
// assigning a value to some key
k.AppendChild(newExpression(tok))
stack.Push(newExprStatement(k))
case ASTKindExpr:
@@ -250,6 +250,13 @@ loop:
if !runeCompare(tok.Raw(), openBrace) {
return nil, NewParseError("expected '['")
}
// If OpenScopeState is not at the start, we must mark the previous ast as complete
//
// for example: if previous ast was a skip statement;
// we should mark it as complete before we create a new statement
if k.Kind != ASTKindStart {
stack.MarkComplete(k)
}
stmt := newStatement()
stack.Push(stmt)

Some files were not shown because too many files have changed in this diff Show More